vfs.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. // Package vfs provides local and remote filesystems support
  2. package vfs
  3. import (
  4. "errors"
  5. "fmt"
  6. "io"
  7. "net/url"
  8. "os"
  9. "path"
  10. "path/filepath"
  11. "runtime"
  12. "strings"
  13. "time"
  14. "github.com/eikenb/pipeat"
  15. "github.com/drakkan/sftpgo/logger"
  16. "github.com/drakkan/sftpgo/utils"
  17. )
  18. const dirMimeType = "inode/directory"
  19. var validAzAccessTier = []string{"", "Archive", "Hot", "Cool"}
  20. // Fs defines the interface for filesystem backends
  21. type Fs interface {
  22. Name() string
  23. ConnectionID() string
  24. Stat(name string) (os.FileInfo, error)
  25. Lstat(name string) (os.FileInfo, error)
  26. Open(name string, offset int64) (File, *pipeat.PipeReaderAt, func(), error)
  27. Create(name string, flag int) (File, *PipeWriter, func(), error)
  28. Rename(source, target string) error
  29. Remove(name string, isDir bool) error
  30. Mkdir(name string) error
  31. Symlink(source, target string) error
  32. Chown(name string, uid int, gid int) error
  33. Chmod(name string, mode os.FileMode) error
  34. Chtimes(name string, atime, mtime time.Time) error
  35. Truncate(name string, size int64) error
  36. ReadDir(dirname string) ([]os.FileInfo, error)
  37. Readlink(name string) (string, error)
  38. IsUploadResumeSupported() bool
  39. IsAtomicUploadSupported() bool
  40. CheckRootPath(username string, uid int, gid int) bool
  41. ResolvePath(sftpPath string) (string, error)
  42. IsNotExist(err error) bool
  43. IsPermission(err error) bool
  44. IsNotSupported(err error) bool
  45. ScanRootDirContents() (int, int64, error)
  46. GetDirSize(dirname string) (int, int64, error)
  47. GetAtomicUploadPath(name string) string
  48. GetRelativePath(name string) string
  49. Walk(root string, walkFn filepath.WalkFunc) error
  50. Join(elem ...string) string
  51. HasVirtualFolders() bool
  52. GetMimeType(name string) (string, error)
  53. }
  54. // File defines an interface representing a SFTPGo file
  55. type File interface {
  56. io.Reader
  57. io.Writer
  58. io.Closer
  59. io.ReaderAt
  60. io.WriterAt
  61. io.Seeker
  62. Stat() (os.FileInfo, error)
  63. Name() string
  64. Truncate(size int64) error
  65. }
  66. // ErrVfsUnsupported defines the error for an unsupported VFS operation
  67. var ErrVfsUnsupported = errors.New("Not supported")
  68. // QuotaCheckResult defines the result for a quota check
  69. type QuotaCheckResult struct {
  70. HasSpace bool
  71. AllowedSize int64
  72. AllowedFiles int
  73. UsedSize int64
  74. UsedFiles int
  75. QuotaSize int64
  76. QuotaFiles int
  77. }
  78. // GetRemainingSize returns the remaining allowed size
  79. func (q *QuotaCheckResult) GetRemainingSize() int64 {
  80. if q.QuotaSize > 0 {
  81. return q.QuotaSize - q.UsedSize
  82. }
  83. return 0
  84. }
  85. // GetRemainingFiles returns the remaining allowed files
  86. func (q *QuotaCheckResult) GetRemainingFiles() int {
  87. if q.QuotaFiles > 0 {
  88. return q.QuotaFiles - q.UsedFiles
  89. }
  90. return 0
  91. }
  92. // S3FsConfig defines the configuration for S3 based filesystem
  93. type S3FsConfig struct {
  94. Bucket string `json:"bucket,omitempty"`
  95. // KeyPrefix is similar to a chroot directory for local filesystem.
  96. // If specified then the SFTP user will only see objects that starts
  97. // with this prefix and so you can restrict access to a specific
  98. // folder. The prefix, if not empty, must not start with "/" and must
  99. // end with "/".
  100. // If empty the whole bucket contents will be available
  101. KeyPrefix string `json:"key_prefix,omitempty"`
  102. Region string `json:"region,omitempty"`
  103. AccessKey string `json:"access_key,omitempty"`
  104. AccessSecret Secret `json:"access_secret,omitempty"`
  105. Endpoint string `json:"endpoint,omitempty"`
  106. StorageClass string `json:"storage_class,omitempty"`
  107. // The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
  108. // and if this value is set to zero, the default value (5MB) for the AWS SDK will be used.
  109. // The minimum allowed value is 5.
  110. // Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than
  111. // the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload
  112. // of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out.
  113. // Keep this in mind if you customize these parameters.
  114. UploadPartSize int64 `json:"upload_part_size,omitempty"`
  115. // How many parts are uploaded in parallel
  116. UploadConcurrency int `json:"upload_concurrency,omitempty"`
  117. }
  118. // GCSFsConfig defines the configuration for Google Cloud Storage based filesystem
  119. type GCSFsConfig struct {
  120. Bucket string `json:"bucket,omitempty"`
  121. // KeyPrefix is similar to a chroot directory for local filesystem.
  122. // If specified then the SFTP user will only see objects that starts
  123. // with this prefix and so you can restrict access to a specific
  124. // folder. The prefix, if not empty, must not start with "/" and must
  125. // end with "/".
  126. // If empty the whole bucket contents will be available
  127. KeyPrefix string `json:"key_prefix,omitempty"`
  128. CredentialFile string `json:"-"`
  129. Credentials Secret `json:"credentials,omitempty"`
  130. // 0 explicit, 1 automatic
  131. AutomaticCredentials int `json:"automatic_credentials,omitempty"`
  132. StorageClass string `json:"storage_class,omitempty"`
  133. }
  134. // AzBlobFsConfig defines the configuration for Azure Blob Storage based filesystem
  135. type AzBlobFsConfig struct {
  136. Container string `json:"container,omitempty"`
  137. // Storage Account Name, leave blank to use SAS URL
  138. AccountName string `json:"account_name,omitempty"`
  139. // Storage Account Key leave blank to use SAS URL.
  140. // The access key is stored encrypted (AES-256-GCM)
  141. AccountKey Secret `json:"account_key,omitempty"`
  142. // Optional endpoint. Default is "blob.core.windows.net".
  143. // If you use the emulator the endpoint must include the protocol,
  144. // for example "http://127.0.0.1:10000"
  145. Endpoint string `json:"endpoint,omitempty"`
  146. // Shared access signature URL, leave blank if using account/key
  147. SASURL string `json:"sas_url,omitempty"`
  148. // KeyPrefix is similar to a chroot directory for local filesystem.
  149. // If specified then the SFTPGo userd will only see objects that starts
  150. // with this prefix and so you can restrict access to a specific
  151. // folder. The prefix, if not empty, must not start with "/" and must
  152. // end with "/".
  153. // If empty the whole bucket contents will be available
  154. KeyPrefix string `json:"key_prefix,omitempty"`
  155. // The buffer size (in MB) to use for multipart uploads.
  156. // If this value is set to zero, the default value (1MB) for the Azure SDK will be used.
  157. // Please note that if the upload bandwidth between the SFTPGo client and SFTPGo server is
  158. // greater than the upload bandwidth between SFTPGo and Azure then the SFTP client have
  159. // to wait for the upload of the last parts to Azure after it ends the file upload to SFTPGo,
  160. // and it may time out.
  161. // Keep this in mind if you customize these parameters.
  162. UploadPartSize int64 `json:"upload_part_size,omitempty"`
  163. // How many parts are uploaded in parallel
  164. UploadConcurrency int `json:"upload_concurrency,omitempty"`
  165. // Set to true if you use an Azure emulator such as Azurite
  166. UseEmulator bool `json:"use_emulator,omitempty"`
  167. // Blob Access Tier
  168. AccessTier string `json:"access_tier,omitempty"`
  169. }
  170. // PipeWriter defines a wrapper for pipeat.PipeWriterAt.
  171. type PipeWriter struct {
  172. writer *pipeat.PipeWriterAt
  173. err error
  174. done chan bool
  175. }
  176. // NewPipeWriter initializes a new PipeWriter
  177. func NewPipeWriter(w *pipeat.PipeWriterAt) *PipeWriter {
  178. return &PipeWriter{
  179. writer: w,
  180. err: nil,
  181. done: make(chan bool),
  182. }
  183. }
  184. // Close waits for the upload to end, closes the pipeat.PipeWriterAt and returns an error if any.
  185. func (p *PipeWriter) Close() error {
  186. p.writer.Close() //nolint:errcheck // the returned error is always null
  187. <-p.done
  188. return p.err
  189. }
  190. // Done unlocks other goroutines waiting on Close().
  191. // It must be called when the upload ends
  192. func (p *PipeWriter) Done(err error) {
  193. p.err = err
  194. p.done <- true
  195. }
  196. // WriteAt is a wrapper for pipeat WriteAt
  197. func (p *PipeWriter) WriteAt(data []byte, off int64) (int, error) {
  198. return p.writer.WriteAt(data, off)
  199. }
  200. // Write is a wrapper for pipeat Write
  201. func (p *PipeWriter) Write(data []byte) (int, error) {
  202. return p.writer.Write(data)
  203. }
  204. // IsDirectory checks if a path exists and is a directory
  205. func IsDirectory(fs Fs, path string) (bool, error) {
  206. fileInfo, err := fs.Stat(path)
  207. if err != nil {
  208. return false, err
  209. }
  210. return fileInfo.IsDir(), err
  211. }
  212. // IsLocalOsFs returns true if fs is the local filesystem implementation
  213. func IsLocalOsFs(fs Fs) bool {
  214. return fs.Name() == osFsName
  215. }
  216. func checkS3Credentials(config *S3FsConfig) error {
  217. if config.AccessKey == "" && !config.AccessSecret.IsEmpty() {
  218. return errors.New("access_key cannot be empty with access_secret not empty")
  219. }
  220. if config.AccessSecret.IsEmpty() && config.AccessKey != "" {
  221. return errors.New("access_secret cannot be empty with access_key not empty")
  222. }
  223. if config.AccessSecret.IsEncrypted() && !config.AccessSecret.IsValid() {
  224. return errors.New("invalid encrypted access_secret")
  225. }
  226. if !config.AccessSecret.IsEmpty() && !config.AccessSecret.IsValidInput() {
  227. return errors.New("invalid access_secret")
  228. }
  229. return nil
  230. }
  231. // ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
  232. func ValidateS3FsConfig(config *S3FsConfig) error {
  233. if config.Bucket == "" {
  234. return errors.New("bucket cannot be empty")
  235. }
  236. if config.Region == "" {
  237. return errors.New("region cannot be empty")
  238. }
  239. if err := checkS3Credentials(config); err != nil {
  240. return err
  241. }
  242. if config.KeyPrefix != "" {
  243. if strings.HasPrefix(config.KeyPrefix, "/") {
  244. return errors.New("key_prefix cannot start with /")
  245. }
  246. config.KeyPrefix = path.Clean(config.KeyPrefix)
  247. if !strings.HasSuffix(config.KeyPrefix, "/") {
  248. config.KeyPrefix += "/"
  249. }
  250. }
  251. if config.UploadPartSize != 0 && (config.UploadPartSize < 5 || config.UploadPartSize > 5000) {
  252. return errors.New("upload_part_size cannot be != 0, lower than 5 (MB) or greater than 5000 (MB)")
  253. }
  254. if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
  255. return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
  256. }
  257. return nil
  258. }
  259. // ValidateGCSFsConfig returns nil if the specified GCS config is valid, otherwise an error
  260. func ValidateGCSFsConfig(config *GCSFsConfig, credentialsFilePath string) error {
  261. if config.Bucket == "" {
  262. return errors.New("bucket cannot be empty")
  263. }
  264. if config.KeyPrefix != "" {
  265. if strings.HasPrefix(config.KeyPrefix, "/") {
  266. return errors.New("key_prefix cannot start with /")
  267. }
  268. config.KeyPrefix = path.Clean(config.KeyPrefix)
  269. if !strings.HasSuffix(config.KeyPrefix, "/") {
  270. config.KeyPrefix += "/"
  271. }
  272. }
  273. if config.Credentials.IsEncrypted() && !config.Credentials.IsValid() {
  274. return errors.New("invalid encrypted credentials")
  275. }
  276. if !config.Credentials.IsValidInput() && config.AutomaticCredentials == 0 {
  277. fi, err := os.Stat(credentialsFilePath)
  278. if err != nil {
  279. return fmt.Errorf("invalid credentials %v", err)
  280. }
  281. if fi.Size() == 0 {
  282. return errors.New("credentials cannot be empty")
  283. }
  284. }
  285. return nil
  286. }
  287. // ValidateAzBlobFsConfig returns nil if the specified Azure Blob config is valid, otherwise an error
  288. func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
  289. if config.SASURL != "" {
  290. _, err := url.Parse(config.SASURL)
  291. return err
  292. }
  293. if config.Container == "" {
  294. return errors.New("container cannot be empty")
  295. }
  296. if config.AccountName == "" || !config.AccountKey.IsValidInput() {
  297. return errors.New("credentials cannot be empty or invalid")
  298. }
  299. if config.AccountKey.IsEncrypted() && !config.AccountKey.IsValid() {
  300. return errors.New("invalid encrypted account_key")
  301. }
  302. if config.KeyPrefix != "" {
  303. if strings.HasPrefix(config.KeyPrefix, "/") {
  304. return errors.New("key_prefix cannot start with /")
  305. }
  306. config.KeyPrefix = path.Clean(config.KeyPrefix)
  307. if !strings.HasSuffix(config.KeyPrefix, "/") {
  308. config.KeyPrefix += "/"
  309. }
  310. }
  311. if config.UploadPartSize < 0 || config.UploadPartSize > 100 {
  312. return fmt.Errorf("invalid upload part size: %v", config.UploadPartSize)
  313. }
  314. if config.UploadConcurrency < 0 || config.UploadConcurrency > 64 {
  315. return fmt.Errorf("invalid upload concurrency: %v", config.UploadConcurrency)
  316. }
  317. if !utils.IsStringInSlice(config.AccessTier, validAzAccessTier) {
  318. return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", config.AccessTier, strings.Join(validAzAccessTier, ", "))
  319. }
  320. return nil
  321. }
  322. // SetPathPermissions calls fs.Chown.
  323. // It does nothing for local filesystem on windows
  324. func SetPathPermissions(fs Fs, path string, uid int, gid int) {
  325. if IsLocalOsFs(fs) {
  326. if runtime.GOOS == "windows" {
  327. return
  328. }
  329. }
  330. if err := fs.Chown(path, uid, gid); err != nil {
  331. fsLog(fs, logger.LevelWarn, "error chowning path %v: %v", path, err)
  332. }
  333. }
  334. func fsLog(fs Fs, level logger.LogLevel, format string, v ...interface{}) {
  335. logger.Log(level, fs.Name(), fs.ConnectionID(), format, v...)
  336. }