| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141 |
- // Copyright (C) 2019-2022 Nicola Murino
- //
- // This program is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Affero General Public License as published
- // by the Free Software Foundation, version 3.
- //
- // This program is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Affero General Public License for more details.
- //
- // You should have received a copy of the GNU Affero General Public License
- // along with this program. If not, see <https://www.gnu.org/licenses/>.
- package common
- import (
- "bytes"
- "context"
- "encoding/csv"
- "errors"
- "fmt"
- "io"
- "mime"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "net/url"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
- "github.com/klauspost/compress/zip"
- "github.com/robfig/cron/v3"
- "github.com/rs/xid"
- "github.com/sftpgo/sdk"
- mail "github.com/xhit/go-simple-mail/v2"
- "github.com/drakkan/sftpgo/v2/internal/dataprovider"
- "github.com/drakkan/sftpgo/v2/internal/logger"
- "github.com/drakkan/sftpgo/v2/internal/plugin"
- "github.com/drakkan/sftpgo/v2/internal/smtp"
- "github.com/drakkan/sftpgo/v2/internal/util"
- "github.com/drakkan/sftpgo/v2/internal/vfs"
- )
- const (
- ipBlockedEventName = "IP Blocked"
- maxAttachmentsSize = int64(10 * 1024 * 1024)
- )
- var (
- // eventManager handle the supported event rules actions
- eventManager eventRulesContainer
- multipartQuoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
- )
- func init() {
- eventManager = eventRulesContainer{
- schedulesMapping: make(map[string][]cron.EntryID),
- // arbitrary maximum number of concurrent asynchronous tasks,
- // each task could execute multiple actions
- concurrencyGuard: make(chan struct{}, 200),
- }
- dataprovider.SetEventRulesCallbacks(eventManager.loadRules, eventManager.RemoveRule,
- func(operation, executor, ip, objectType, objectName string, object plugin.Renderer) {
- eventManager.handleProviderEvent(EventParams{
- Name: executor,
- ObjectName: objectName,
- Event: operation,
- Status: 1,
- ObjectType: objectType,
- IP: ip,
- Timestamp: time.Now().UnixNano(),
- Object: object,
- })
- })
- }
- // HandleCertificateEvent checks and executes action rules for certificate events
- func HandleCertificateEvent(params EventParams) {
- eventManager.handleCertificateEvent(params)
- }
- // eventRulesContainer stores event rules by trigger
- type eventRulesContainer struct {
- sync.RWMutex
- lastLoad atomic.Int64
- FsEvents []dataprovider.EventRule
- ProviderEvents []dataprovider.EventRule
- Schedules []dataprovider.EventRule
- IPBlockedEvents []dataprovider.EventRule
- CertificateEvents []dataprovider.EventRule
- schedulesMapping map[string][]cron.EntryID
- concurrencyGuard chan struct{}
- }
- func (r *eventRulesContainer) addAsyncTask() {
- activeHooks.Add(1)
- r.concurrencyGuard <- struct{}{}
- }
- func (r *eventRulesContainer) removeAsyncTask() {
- activeHooks.Add(-1)
- <-r.concurrencyGuard
- }
- func (r *eventRulesContainer) getLastLoadTime() int64 {
- return r.lastLoad.Load()
- }
- func (r *eventRulesContainer) setLastLoadTime(modTime int64) {
- r.lastLoad.Store(modTime)
- }
- // RemoveRule deletes the rule with the specified name
- func (r *eventRulesContainer) RemoveRule(name string) {
- r.Lock()
- defer r.Unlock()
- r.removeRuleInternal(name)
- eventManagerLog(logger.LevelDebug, "event rules updated after delete, fs events: %d, provider events: %d, schedules: %d",
- len(r.FsEvents), len(r.ProviderEvents), len(r.Schedules))
- }
- func (r *eventRulesContainer) removeRuleInternal(name string) {
- for idx := range r.FsEvents {
- if r.FsEvents[idx].Name == name {
- lastIdx := len(r.FsEvents) - 1
- r.FsEvents[idx] = r.FsEvents[lastIdx]
- r.FsEvents = r.FsEvents[:lastIdx]
- eventManagerLog(logger.LevelDebug, "removed rule %q from fs events", name)
- return
- }
- }
- for idx := range r.ProviderEvents {
- if r.ProviderEvents[idx].Name == name {
- lastIdx := len(r.ProviderEvents) - 1
- r.ProviderEvents[idx] = r.ProviderEvents[lastIdx]
- r.ProviderEvents = r.ProviderEvents[:lastIdx]
- eventManagerLog(logger.LevelDebug, "removed rule %q from provider events", name)
- return
- }
- }
- for idx := range r.IPBlockedEvents {
- if r.IPBlockedEvents[idx].Name == name {
- lastIdx := len(r.IPBlockedEvents) - 1
- r.IPBlockedEvents[idx] = r.IPBlockedEvents[lastIdx]
- r.IPBlockedEvents = r.IPBlockedEvents[:lastIdx]
- eventManagerLog(logger.LevelDebug, "removed rule %q from IP blocked events", name)
- return
- }
- }
- for idx := range r.CertificateEvents {
- if r.CertificateEvents[idx].Name == name {
- lastIdx := len(r.CertificateEvents) - 1
- r.CertificateEvents[idx] = r.CertificateEvents[lastIdx]
- r.CertificateEvents = r.CertificateEvents[:lastIdx]
- eventManagerLog(logger.LevelDebug, "removed rule %q from certificate events", name)
- return
- }
- }
- for idx := range r.Schedules {
- if r.Schedules[idx].Name == name {
- if schedules, ok := r.schedulesMapping[name]; ok {
- for _, entryID := range schedules {
- eventManagerLog(logger.LevelDebug, "removing scheduled entry id %d for rule %q", entryID, name)
- eventScheduler.Remove(entryID)
- }
- delete(r.schedulesMapping, name)
- }
- lastIdx := len(r.Schedules) - 1
- r.Schedules[idx] = r.Schedules[lastIdx]
- r.Schedules = r.Schedules[:lastIdx]
- eventManagerLog(logger.LevelDebug, "removed rule %q from scheduled events", name)
- return
- }
- }
- }
- func (r *eventRulesContainer) addUpdateRuleInternal(rule dataprovider.EventRule) {
- r.removeRuleInternal(rule.Name)
- if rule.DeletedAt > 0 {
- deletedAt := util.GetTimeFromMsecSinceEpoch(rule.DeletedAt)
- if deletedAt.Add(30 * time.Minute).Before(time.Now()) {
- eventManagerLog(logger.LevelDebug, "removing rule %q deleted at %s", rule.Name, deletedAt)
- go dataprovider.RemoveEventRule(rule) //nolint:errcheck
- }
- return
- }
- switch rule.Trigger {
- case dataprovider.EventTriggerFsEvent:
- r.FsEvents = append(r.FsEvents, rule)
- eventManagerLog(logger.LevelDebug, "added rule %q to fs events", rule.Name)
- case dataprovider.EventTriggerProviderEvent:
- r.ProviderEvents = append(r.ProviderEvents, rule)
- eventManagerLog(logger.LevelDebug, "added rule %q to provider events", rule.Name)
- case dataprovider.EventTriggerIPBlocked:
- r.IPBlockedEvents = append(r.IPBlockedEvents, rule)
- eventManagerLog(logger.LevelDebug, "added rule %q to IP blocked events", rule.Name)
- case dataprovider.EventTriggerCertificate:
- r.CertificateEvents = append(r.CertificateEvents, rule)
- eventManagerLog(logger.LevelDebug, "added rule %q to certificate events", rule.Name)
- case dataprovider.EventTriggerSchedule:
- for _, schedule := range rule.Conditions.Schedules {
- cronSpec := schedule.GetCronSpec()
- job := &eventCronJob{
- ruleName: dataprovider.ConvertName(rule.Name),
- }
- entryID, err := eventScheduler.AddJob(cronSpec, job)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to add scheduled rule %q, cron string %q: %v", rule.Name, cronSpec, err)
- return
- }
- r.schedulesMapping[rule.Name] = append(r.schedulesMapping[rule.Name], entryID)
- eventManagerLog(logger.LevelDebug, "schedule for rule %q added, id: %d, cron string %q, active scheduling rules: %d",
- rule.Name, entryID, cronSpec, len(r.schedulesMapping))
- }
- r.Schedules = append(r.Schedules, rule)
- eventManagerLog(logger.LevelDebug, "added rule %q to scheduled events", rule.Name)
- default:
- eventManagerLog(logger.LevelError, "unsupported trigger: %d", rule.Trigger)
- }
- }
- func (r *eventRulesContainer) loadRules() {
- eventManagerLog(logger.LevelDebug, "loading updated rules")
- modTime := util.GetTimeAsMsSinceEpoch(time.Now())
- rules, err := dataprovider.GetRecentlyUpdatedRules(r.getLastLoadTime())
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to load event rules: %v", err)
- return
- }
- eventManagerLog(logger.LevelDebug, "recently updated event rules loaded: %d", len(rules))
- if len(rules) > 0 {
- r.Lock()
- defer r.Unlock()
- for _, rule := range rules {
- r.addUpdateRuleInternal(rule)
- }
- }
- eventManagerLog(logger.LevelDebug, "event rules updated, fs events: %d, provider events: %d, schedules: %d, ip blocked events: %d, certificate events: %d",
- len(r.FsEvents), len(r.ProviderEvents), len(r.Schedules), len(r.IPBlockedEvents), len(r.CertificateEvents))
- r.setLastLoadTime(modTime)
- }
- func (r *eventRulesContainer) checkProviderEventMatch(conditions dataprovider.EventConditions, params EventParams) bool {
- if !util.Contains(conditions.ProviderEvents, params.Event) {
- return false
- }
- if !checkEventConditionPatterns(params.Name, conditions.Options.Names) {
- return false
- }
- if len(conditions.Options.ProviderObjects) > 0 && !util.Contains(conditions.Options.ProviderObjects, params.ObjectType) {
- return false
- }
- return true
- }
- func (r *eventRulesContainer) checkFsEventMatch(conditions dataprovider.EventConditions, params EventParams) bool {
- if !util.Contains(conditions.FsEvents, params.Event) {
- return false
- }
- if !checkEventConditionPatterns(params.Name, conditions.Options.Names) {
- return false
- }
- if !checkEventGroupConditionPatters(params.Groups, conditions.Options.GroupNames) {
- return false
- }
- if !checkEventConditionPatterns(params.VirtualPath, conditions.Options.FsPaths) {
- if !checkEventConditionPatterns(params.ObjectName, conditions.Options.FsPaths) {
- return false
- }
- }
- if len(conditions.Options.Protocols) > 0 && !util.Contains(conditions.Options.Protocols, params.Protocol) {
- return false
- }
- if params.Event == operationUpload || params.Event == operationDownload {
- if conditions.Options.MinFileSize > 0 {
- if params.FileSize < conditions.Options.MinFileSize {
- return false
- }
- }
- if conditions.Options.MaxFileSize > 0 {
- if params.FileSize > conditions.Options.MaxFileSize {
- return false
- }
- }
- }
- return true
- }
- // hasFsRules returns true if there are any rules for filesystem event triggers
- func (r *eventRulesContainer) hasFsRules() bool {
- r.RLock()
- defer r.RUnlock()
- return len(r.FsEvents) > 0
- }
- // handleFsEvent executes the rules actions defined for the specified event
- func (r *eventRulesContainer) handleFsEvent(params EventParams) error {
- if params.Protocol == protocolEventAction {
- return nil
- }
- r.RLock()
- var rulesWithSyncActions, rulesAsync []dataprovider.EventRule
- for _, rule := range r.FsEvents {
- if r.checkFsEventMatch(rule.Conditions, params) {
- if err := rule.CheckActionsConsistency(""); err != nil {
- eventManagerLog(logger.LevelWarn, "rule %q skipped: %v, event %q",
- rule.Name, err, params.Event)
- continue
- }
- hasSyncActions := false
- for _, action := range rule.Actions {
- if action.Options.ExecuteSync {
- hasSyncActions = true
- break
- }
- }
- if hasSyncActions {
- rulesWithSyncActions = append(rulesWithSyncActions, rule)
- } else {
- rulesAsync = append(rulesAsync, rule)
- }
- }
- }
- r.RUnlock()
- params.sender = params.Name
- if len(rulesAsync) > 0 {
- go executeAsyncRulesActions(rulesAsync, params)
- }
- if len(rulesWithSyncActions) > 0 {
- return executeSyncRulesActions(rulesWithSyncActions, params)
- }
- return nil
- }
- // username is populated for user objects
- func (r *eventRulesContainer) handleProviderEvent(params EventParams) {
- r.RLock()
- defer r.RUnlock()
- var rules []dataprovider.EventRule
- for _, rule := range r.ProviderEvents {
- if r.checkProviderEventMatch(rule.Conditions, params) {
- if err := rule.CheckActionsConsistency(params.ObjectType); err == nil {
- rules = append(rules, rule)
- } else {
- eventManagerLog(logger.LevelWarn, "rule %q skipped: %v, event %q object type %q",
- rule.Name, err, params.Event, params.ObjectType)
- }
- }
- }
- if len(rules) > 0 {
- params.sender = params.ObjectName
- go executeAsyncRulesActions(rules, params)
- }
- }
- func (r *eventRulesContainer) handleIPBlockedEvent(params EventParams) {
- r.RLock()
- defer r.RUnlock()
- if len(r.IPBlockedEvents) == 0 {
- return
- }
- var rules []dataprovider.EventRule
- for _, rule := range r.IPBlockedEvents {
- if err := rule.CheckActionsConsistency(""); err == nil {
- rules = append(rules, rule)
- } else {
- eventManagerLog(logger.LevelWarn, "rule %q skipped: %v, event %q",
- rule.Name, err, params.Event)
- }
- }
- if len(rules) > 0 {
- go executeAsyncRulesActions(rules, params)
- }
- }
- func (r *eventRulesContainer) handleCertificateEvent(params EventParams) {
- r.RLock()
- defer r.RUnlock()
- if len(r.CertificateEvents) == 0 {
- return
- }
- var rules []dataprovider.EventRule
- for _, rule := range r.CertificateEvents {
- if err := rule.CheckActionsConsistency(""); err == nil {
- rules = append(rules, rule)
- } else {
- eventManagerLog(logger.LevelWarn, "rule %q skipped: %v, event %q",
- rule.Name, err, params.Event)
- }
- }
- if len(rules) > 0 {
- go executeAsyncRulesActions(rules, params)
- }
- }
- type executedRetentionCheck struct {
- Username string
- ActionName string
- Results []folderRetentionCheckResult
- }
- // EventParams defines the supported event parameters
- type EventParams struct {
- Name string
- Groups []sdk.GroupMapping
- Event string
- Status int
- VirtualPath string
- FsPath string
- VirtualTargetPath string
- FsTargetPath string
- ObjectName string
- ObjectType string
- FileSize int64
- Protocol string
- IP string
- Timestamp int64
- Object plugin.Renderer
- sender string
- updateStatusFromError bool
- errors []string
- retentionChecks []executedRetentionCheck
- }
- func (p *EventParams) getACopy() *EventParams {
- params := *p
- params.errors = make([]string, len(p.errors))
- copy(params.errors, p.errors)
- retentionChecks := make([]executedRetentionCheck, 0, len(p.retentionChecks))
- for _, c := range p.retentionChecks {
- executedCheck := executedRetentionCheck{
- Username: c.Username,
- ActionName: c.ActionName,
- }
- executedCheck.Results = make([]folderRetentionCheckResult, len(c.Results))
- copy(executedCheck.Results, c.Results)
- retentionChecks = append(retentionChecks, executedCheck)
- }
- params.retentionChecks = retentionChecks
- return ¶ms
- }
- // AddError adds a new error to the event params and update the status if needed
- func (p *EventParams) AddError(err error) {
- if err == nil {
- return
- }
- if p.updateStatusFromError && p.Status == 1 {
- p.Status = 2
- }
- p.errors = append(p.errors, err.Error())
- }
- func (p *EventParams) setBackupParams(backupPath string) {
- if p.sender != "" {
- return
- }
- p.sender = dataprovider.ActionExecutorSystem
- p.FsPath = backupPath
- p.ObjectName = filepath.Base(backupPath)
- p.VirtualPath = "/" + p.ObjectName
- p.Timestamp = time.Now().UnixNano()
- info, err := os.Stat(backupPath)
- if err == nil {
- p.FileSize = info.Size()
- }
- }
- func (p *EventParams) getStatusString() string {
- switch p.Status {
- case 1:
- return "OK"
- default:
- return "KO"
- }
- }
- // getUsers returns users with group settings not applied
- func (p *EventParams) getUsers() ([]dataprovider.User, error) {
- if p.sender == "" {
- users, err := dataprovider.DumpUsers()
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to get users: %+v", err)
- return users, errors.New("unable to get users")
- }
- return users, nil
- }
- user, err := p.getUserFromSender()
- if err != nil {
- return nil, err
- }
- return []dataprovider.User{user}, nil
- }
- func (p *EventParams) getUserFromSender() (dataprovider.User, error) {
- if p.sender == dataprovider.ActionExecutorSystem {
- return dataprovider.User{
- BaseUser: sdk.BaseUser{
- Status: 1,
- Username: p.sender,
- HomeDir: dataprovider.GetBackupsPath(),
- Permissions: map[string][]string{
- "/": {dataprovider.PermAny},
- },
- },
- }, nil
- }
- user, err := dataprovider.UserExists(p.sender)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to get user %q: %+v", p.sender, err)
- return user, fmt.Errorf("error getting user %q", p.sender)
- }
- return user, nil
- }
- func (p *EventParams) getFolders() ([]vfs.BaseVirtualFolder, error) {
- if p.sender == "" {
- return dataprovider.DumpFolders()
- }
- folder, err := dataprovider.GetFolderByName(p.sender)
- if err != nil {
- return nil, fmt.Errorf("error getting folder %q: %w", p.sender, err)
- }
- return []vfs.BaseVirtualFolder{folder}, nil
- }
- func (p *EventParams) getCompressedDataRetentionReport() ([]byte, error) {
- if len(p.retentionChecks) == 0 {
- return nil, errors.New("no data retention report available")
- }
- var b bytes.Buffer
- wr := zip.NewWriter(&b)
- for _, check := range p.retentionChecks {
- if size := int64(len(b.Bytes())); size > maxAttachmentsSize {
- eventManagerLog(logger.LevelError, "unable to get retention report, size too large: %s", util.ByteCountIEC(size))
- return nil, fmt.Errorf("unable to get retention report, size too large: %s", util.ByteCountIEC(size))
- }
- data, err := getCSVRetentionReport(check.Results)
- if err != nil {
- return nil, fmt.Errorf("unable to get CSV report: %w", err)
- }
- fh := &zip.FileHeader{
- Name: fmt.Sprintf("%s-%s.csv", check.ActionName, check.Username),
- Method: zip.Deflate,
- Modified: time.Now().UTC(),
- }
- f, err := wr.CreateHeader(fh)
- if err != nil {
- return nil, fmt.Errorf("unable to create zip header for file %q: %w", fh.Name, err)
- }
- _, err = io.Copy(f, bytes.NewBuffer(data))
- if err != nil {
- return nil, fmt.Errorf("unable to write content to zip file %q: %w", fh.Name, err)
- }
- }
- if err := wr.Close(); err != nil {
- return nil, fmt.Errorf("unable to close zip writer: %w", err)
- }
- return b.Bytes(), nil
- }
- func (p *EventParams) getRetentionReportsAsMailAttachment() (mail.File, error) {
- var result mail.File
- data, err := p.getCompressedDataRetentionReport()
- if err != nil {
- return result, err
- }
- result.Name = "retention-reports.zip"
- result.Data = data
- return result, nil
- }
- func (p *EventParams) getStringReplacements(addObjectData bool) []string {
- replacements := []string{
- "{{Name}}", p.Name,
- "{{Event}}", p.Event,
- "{{Status}}", fmt.Sprintf("%d", p.Status),
- "{{VirtualPath}}", p.VirtualPath,
- "{{FsPath}}", p.FsPath,
- "{{VirtualTargetPath}}", p.VirtualTargetPath,
- "{{FsTargetPath}}", p.FsTargetPath,
- "{{ObjectName}}", p.ObjectName,
- "{{ObjectType}}", p.ObjectType,
- "{{FileSize}}", fmt.Sprintf("%d", p.FileSize),
- "{{Protocol}}", p.Protocol,
- "{{IP}}", p.IP,
- "{{Timestamp}}", fmt.Sprintf("%d", p.Timestamp),
- "{{StatusString}}", p.getStatusString(),
- }
- if p.VirtualPath != "" {
- replacements = append(replacements, "{{VirtualDirPath}}", path.Dir(p.VirtualPath))
- }
- if p.VirtualTargetPath != "" {
- replacements = append(replacements, "{{VirtualTargetDirPath}}", path.Dir(p.VirtualTargetPath))
- replacements = append(replacements, "{{TargetName}}", path.Base(p.VirtualTargetPath))
- }
- if len(p.errors) > 0 {
- replacements = append(replacements, "{{ErrorString}}", strings.Join(p.errors, ", "))
- } else {
- replacements = append(replacements, "{{ErrorString}}", "")
- }
- replacements = append(replacements, "{{ObjectData}}", "")
- if addObjectData {
- data, err := p.Object.RenderAsJSON(p.Event != operationDelete)
- if err == nil {
- replacements[len(replacements)-1] = string(data)
- }
- }
- return replacements
- }
- func getCSVRetentionReport(results []folderRetentionCheckResult) ([]byte, error) {
- var b bytes.Buffer
- csvWriter := csv.NewWriter(&b)
- err := csvWriter.Write([]string{"path", "retention (hours)", "deleted files", "deleted size (bytes)",
- "elapsed (ms)", "info", "error"})
- if err != nil {
- return nil, err
- }
- for _, result := range results {
- err = csvWriter.Write([]string{result.Path, strconv.Itoa(result.Retention), strconv.Itoa(result.DeletedFiles),
- strconv.FormatInt(result.DeletedSize, 10), strconv.FormatInt(result.Elapsed.Milliseconds(), 10),
- result.Info, result.Error})
- if err != nil {
- return nil, err
- }
- }
- csvWriter.Flush()
- err = csvWriter.Error()
- return b.Bytes(), err
- }
- func closeWriterAndUpdateQuota(w io.WriteCloser, conn *BaseConnection, virtualPath string, numFiles int,
- truncatedSize int64, errTransfer error,
- ) error {
- errWrite := w.Close()
- info, err := conn.doStatInternal(virtualPath, 0, false, false)
- if err == nil {
- updateUserQuotaAfterFileWrite(conn, virtualPath, numFiles, info.Size()-truncatedSize)
- _, fsPath, errFs := conn.GetFsAndResolvedPath(virtualPath)
- if errFs == nil {
- if errTransfer == nil {
- errTransfer = errWrite
- }
- ExecuteActionNotification(conn, operationUpload, fsPath, virtualPath, "", "", "", info.Size(), errTransfer) //nolint:errcheck
- }
- } else {
- eventManagerLog(logger.LevelWarn, "unable to update quota after writing %q: %v", virtualPath, err)
- }
- return errWrite
- }
- func updateUserQuotaAfterFileWrite(conn *BaseConnection, virtualPath string, numFiles int, fileSize int64) {
- vfolder, err := conn.User.GetVirtualFolderForPath(path.Dir(virtualPath))
- if err != nil {
- dataprovider.UpdateUserQuota(&conn.User, numFiles, fileSize, false) //nolint:errcheck
- return
- }
- dataprovider.UpdateVirtualFolderQuota(&vfolder.BaseVirtualFolder, numFiles, fileSize, false) //nolint:errcheck
- if vfolder.IsIncludedInUserQuota() {
- dataprovider.UpdateUserQuota(&conn.User, numFiles, fileSize, false) //nolint:errcheck
- }
- }
- func getFileWriter(conn *BaseConnection, virtualPath string) (io.WriteCloser, int, int64, func(), error) {
- fs, fsPath, err := conn.GetFsAndResolvedPath(virtualPath)
- if err != nil {
- return nil, 0, 0, nil, err
- }
- var truncatedSize, fileSize int64
- numFiles := 1
- isFileOverwrite := false
- info, err := fs.Lstat(fsPath)
- if err == nil {
- fileSize = info.Size()
- if info.IsDir() {
- return nil, numFiles, truncatedSize, nil, fmt.Errorf("cannot write to a directory: %q", virtualPath)
- }
- if info.Mode().IsRegular() {
- isFileOverwrite = true
- truncatedSize = fileSize
- }
- numFiles = 0
- }
- if err != nil && !fs.IsNotExist(err) {
- return nil, numFiles, truncatedSize, nil, conn.GetFsError(fs, err)
- }
- f, w, cancelFn, err := fs.Create(fsPath, 0, conn.GetCreateChecks(virtualPath, numFiles == 1))
- if err != nil {
- return nil, numFiles, truncatedSize, nil, conn.GetFsError(fs, err)
- }
- vfs.SetPathPermissions(fs, fsPath, conn.User.GetUID(), conn.User.GetGID())
- if isFileOverwrite {
- if vfs.HasTruncateSupport(fs) || vfs.IsCryptOsFs(fs) {
- updateUserQuotaAfterFileWrite(conn, virtualPath, numFiles, -fileSize)
- truncatedSize = 0
- }
- }
- if cancelFn == nil {
- cancelFn = func() {}
- }
- if f != nil {
- return f, numFiles, truncatedSize, cancelFn, nil
- }
- return w, numFiles, truncatedSize, cancelFn, nil
- }
- func addZipEntry(wr *zipWriterWrapper, conn *BaseConnection, entryPath, baseDir string) error {
- if entryPath == wr.Name {
- // skip the archive itself
- return nil
- }
- info, err := conn.DoStat(entryPath, 1, false)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to add zip entry %#v, stat error: %v", entryPath, err)
- return err
- }
- entryName, err := getZipEntryName(entryPath, baseDir)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to get zip entry name: %v", err)
- return err
- }
- if _, ok := wr.Entries[entryName]; ok {
- eventManagerLog(logger.LevelInfo, "skipping duplicate zip entry %q, is dir %t", entryPath, info.IsDir())
- return nil
- }
- wr.Entries[entryName] = true
- if info.IsDir() {
- _, err = wr.Writer.CreateHeader(&zip.FileHeader{
- Name: entryName + "/",
- Method: zip.Deflate,
- Modified: info.ModTime(),
- })
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to create zip entry %q: %v", entryPath, err)
- return fmt.Errorf("unable to create zip entry %q: %w", entryPath, err)
- }
- contents, err := conn.ListDir(entryPath)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to add zip entry %q, read dir error: %v", entryPath, err)
- return fmt.Errorf("unable to add zip entry %q: %w", entryPath, err)
- }
- for _, info := range contents {
- fullPath := util.CleanPath(path.Join(entryPath, info.Name()))
- if err := addZipEntry(wr, conn, fullPath, baseDir); err != nil {
- eventManagerLog(logger.LevelError, "unable to add zip entry: %v", err)
- return err
- }
- }
- return nil
- }
- if !info.Mode().IsRegular() {
- // we only allow regular files
- eventManagerLog(logger.LevelInfo, "skipping zip entry for non regular file %q", entryPath)
- return nil
- }
- reader, cancelFn, err := getFileReader(conn, entryPath)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to add zip entry %q, cannot open file: %v", entryPath, err)
- return fmt.Errorf("unable to open %q: %w", entryPath, err)
- }
- defer cancelFn()
- defer reader.Close()
- f, err := wr.Writer.CreateHeader(&zip.FileHeader{
- Name: entryName,
- Method: zip.Deflate,
- Modified: info.ModTime(),
- })
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to create zip entry %q: %v", entryPath, err)
- return fmt.Errorf("unable to create zip entry %q: %w", entryPath, err)
- }
- _, err = io.Copy(f, reader)
- return err
- }
- func getZipEntryName(entryPath, baseDir string) (string, error) {
- if !strings.HasPrefix(entryPath, baseDir) {
- return "", fmt.Errorf("entry path %q is outside base dir %q", entryPath, baseDir)
- }
- entryPath = strings.TrimPrefix(entryPath, baseDir)
- return strings.TrimPrefix(entryPath, "/"), nil
- }
- func getFileReader(conn *BaseConnection, virtualPath string) (io.ReadCloser, func(), error) {
- fs, fsPath, err := conn.GetFsAndResolvedPath(virtualPath)
- if err != nil {
- return nil, nil, err
- }
- f, r, cancelFn, err := fs.Open(fsPath, 0)
- if err != nil {
- return nil, nil, conn.GetFsError(fs, err)
- }
- if cancelFn == nil {
- cancelFn = func() {}
- }
- if f != nil {
- return f, cancelFn, nil
- }
- return r, cancelFn, nil
- }
- func writeFileContent(conn *BaseConnection, virtualPath string, w io.Writer) error {
- reader, cancelFn, err := getFileReader(conn, virtualPath)
- if err != nil {
- return err
- }
- defer cancelFn()
- defer reader.Close()
- _, err = io.Copy(w, reader)
- return err
- }
- func getFileContent(conn *BaseConnection, virtualPath string, expectedSize int) ([]byte, error) {
- reader, cancelFn, err := getFileReader(conn, virtualPath)
- if err != nil {
- return nil, err
- }
- defer cancelFn()
- defer reader.Close()
- data := make([]byte, expectedSize)
- _, err = io.ReadFull(reader, data)
- return data, err
- }
- func getMailAttachments(user dataprovider.User, attachments []string, replacer *strings.Replacer) ([]mail.File, error) {
- var files []mail.File
- user, err := getUserForEventAction(user)
- if err != nil {
- return nil, err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return nil, fmt.Errorf("error getting email attachments, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- totalSize := int64(0)
- for _, virtualPath := range replacePathsPlaceholders(attachments, replacer) {
- info, err := conn.DoStat(virtualPath, 0, false)
- if err != nil {
- return nil, fmt.Errorf("unable to get info for file %q, user %q: %w", virtualPath, conn.User.Username, err)
- }
- if !info.Mode().IsRegular() {
- return nil, fmt.Errorf("cannot attach non regular file %q", virtualPath)
- }
- totalSize += info.Size()
- if totalSize > maxAttachmentsSize {
- return nil, fmt.Errorf("unable to send files as attachment, size too large: %s", util.ByteCountIEC(totalSize))
- }
- data, err := getFileContent(conn, virtualPath, int(info.Size()))
- if err != nil {
- return nil, fmt.Errorf("unable to get content for file %q, user %q: %w", virtualPath, conn.User.Username, err)
- }
- files = append(files, mail.File{
- Name: path.Base(virtualPath),
- Data: data,
- })
- }
- return files, nil
- }
- func replaceWithReplacer(input string, replacer *strings.Replacer) string {
- if !strings.Contains(input, "{{") {
- return input
- }
- return replacer.Replace(input)
- }
- func checkEventConditionPattern(p dataprovider.ConditionPattern, name string) bool {
- matched, err := path.Match(p.Pattern, name)
- if err != nil {
- eventManagerLog(logger.LevelError, "pattern matching error %q, err: %v", p.Pattern, err)
- return false
- }
- if p.InverseMatch {
- return !matched
- }
- return matched
- }
- // checkConditionPatterns returns false if patterns are defined and no match is found
- func checkEventConditionPatterns(name string, patterns []dataprovider.ConditionPattern) bool {
- if len(patterns) == 0 {
- return true
- }
- for _, p := range patterns {
- if checkEventConditionPattern(p, name) {
- return true
- }
- }
- return false
- }
- func checkEventGroupConditionPatters(groups []sdk.GroupMapping, patterns []dataprovider.ConditionPattern) bool {
- if len(patterns) == 0 {
- return true
- }
- for _, group := range groups {
- for _, p := range patterns {
- if checkEventConditionPattern(p, group.Name) {
- return true
- }
- }
- }
- return false
- }
- func getHTTPRuleActionEndpoint(c dataprovider.EventActionHTTPConfig, replacer *strings.Replacer) (string, error) {
- if len(c.QueryParameters) > 0 {
- u, err := url.Parse(c.Endpoint)
- if err != nil {
- return "", fmt.Errorf("invalid endpoint: %w", err)
- }
- q := u.Query()
- for _, keyVal := range c.QueryParameters {
- q.Add(keyVal.Key, replaceWithReplacer(keyVal.Value, replacer))
- }
- u.RawQuery = q.Encode()
- return u.String(), nil
- }
- return c.Endpoint, nil
- }
- func writeHTTPPart(m *multipart.Writer, part dataprovider.HTTPPart, h textproto.MIMEHeader,
- conn *BaseConnection, replacer *strings.Replacer, params *EventParams,
- ) error {
- partWriter, err := m.CreatePart(h)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to create part %q, err: %v", part.Name, err)
- return err
- }
- if part.Body != "" {
- _, err = partWriter.Write([]byte(replaceWithReplacer(part.Body, replacer)))
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to write part %q, err: %v", part.Name, err)
- return err
- }
- return nil
- }
- if part.Filepath == dataprovider.RetentionReportPlaceHolder {
- data, err := params.getCompressedDataRetentionReport()
- if err != nil {
- return err
- }
- _, err = partWriter.Write(data)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to write part %q, err: %v", part.Name, err)
- return err
- }
- return nil
- }
- err = writeFileContent(conn, util.CleanPath(replacer.Replace(part.Filepath)), partWriter)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to write file part %q, err: %v", part.Name, err)
- return err
- }
- return nil
- }
- func getHTTPRuleActionBody(c dataprovider.EventActionHTTPConfig, replacer *strings.Replacer,
- cancel context.CancelFunc, user dataprovider.User, params *EventParams,
- ) (io.ReadCloser, string, error) {
- var body io.ReadCloser
- if c.Method == http.MethodGet {
- return body, "", nil
- }
- if c.Body != "" {
- if c.Body == dataprovider.RetentionReportPlaceHolder {
- data, err := params.getCompressedDataRetentionReport()
- if err != nil {
- return body, "", err
- }
- return io.NopCloser(bytes.NewBuffer(data)), "", nil
- }
- return io.NopCloser(bytes.NewBufferString(replaceWithReplacer(c.Body, replacer))), "", nil
- }
- if len(c.Parts) > 0 {
- r, w := io.Pipe()
- m := multipart.NewWriter(w)
- var conn *BaseConnection
- if user.Username != "" {
- var err error
- user, err = getUserForEventAction(user)
- if err != nil {
- return body, "", err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- if err != nil {
- user.CloseFs() //nolint:errcheck
- return body, "", fmt.Errorf("error getting multipart file/s, unable to check root fs for user %q: %w",
- user.Username, err)
- }
- conn = NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- }
- go func() {
- defer w.Close()
- defer user.CloseFs() //nolint:errcheck
- for _, part := range c.Parts {
- h := make(textproto.MIMEHeader)
- if part.Body != "" {
- h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"`, multipartQuoteEscaper.Replace(part.Name)))
- } else {
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- multipartQuoteEscaper.Replace(part.Name), multipartQuoteEscaper.Replace(path.Base(part.Filepath))))
- contentType := mime.TypeByExtension(path.Ext(part.Filepath))
- if contentType == "" {
- contentType = "application/octet-stream"
- }
- h.Set("Content-Type", contentType)
- }
- for _, keyVal := range part.Headers {
- h.Set(keyVal.Key, replaceWithReplacer(keyVal.Value, replacer))
- }
- if err := writeHTTPPart(m, part, h, conn, replacer, params); err != nil {
- cancel()
- return
- }
- }
- m.Close()
- }()
- return r, m.FormDataContentType(), nil
- }
- return body, "", nil
- }
- func executeHTTPRuleAction(c dataprovider.EventActionHTTPConfig, params *EventParams) error {
- if err := c.TryDecryptPassword(); err != nil {
- return err
- }
- addObjectData := false
- if params.Object != nil {
- addObjectData = c.HasObjectData()
- }
- replacements := params.getStringReplacements(addObjectData)
- replacer := strings.NewReplacer(replacements...)
- endpoint, err := getHTTPRuleActionEndpoint(c, replacer)
- if err != nil {
- return err
- }
- ctx, cancel := c.GetContext()
- defer cancel()
- var user dataprovider.User
- if c.HasMultipartFiles() {
- user, err = params.getUserFromSender()
- if err != nil {
- return err
- }
- }
- body, contentType, err := getHTTPRuleActionBody(c, replacer, cancel, user, params)
- if err != nil {
- return err
- }
- if body != nil {
- defer body.Close()
- }
- req, err := http.NewRequestWithContext(ctx, c.Method, endpoint, body)
- if err != nil {
- return err
- }
- if contentType != "" {
- req.Header.Set("Content-Type", contentType)
- }
- if c.Username != "" {
- req.SetBasicAuth(replaceWithReplacer(c.Username, replacer), c.Password.GetPayload())
- }
- for _, keyVal := range c.Headers {
- req.Header.Set(keyVal.Key, replaceWithReplacer(keyVal.Value, replacer))
- }
- client := c.GetHTTPClient()
- defer client.CloseIdleConnections()
- startTime := time.Now()
- resp, err := client.Do(req)
- if err != nil {
- eventManagerLog(logger.LevelDebug, "unable to send http notification, endpoint: %s, elapsed: %s, err: %v",
- endpoint, time.Since(startTime), err)
- return fmt.Errorf("error sending HTTP request: %w", err)
- }
- defer resp.Body.Close()
- eventManagerLog(logger.LevelDebug, "http notification sent, endpoint: %s, elapsed: %s, status code: %d",
- endpoint, time.Since(startTime), resp.StatusCode)
- if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusNoContent {
- return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
- }
- return nil
- }
- func executeCommandRuleAction(c dataprovider.EventActionCommandConfig, params *EventParams) error {
- addObjectData := false
- if params.Object != nil {
- for _, k := range c.EnvVars {
- if strings.Contains(k.Value, "{{ObjectData}}") {
- addObjectData = true
- break
- }
- }
- }
- replacements := params.getStringReplacements(addObjectData)
- replacer := strings.NewReplacer(replacements...)
- args := make([]string, 0, len(c.Args))
- for _, arg := range c.Args {
- args = append(args, replaceWithReplacer(arg, replacer))
- }
- ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)*time.Second)
- defer cancel()
- cmd := exec.CommandContext(ctx, c.Cmd, args...)
- cmd.Env = []string{}
- for _, keyVal := range c.EnvVars {
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", keyVal.Key, replaceWithReplacer(keyVal.Value, replacer)))
- }
- startTime := time.Now()
- err := cmd.Run()
- eventManagerLog(logger.LevelDebug, "executed command %q, elapsed: %s, error: %v",
- c.Cmd, time.Since(startTime), err)
- return err
- }
- func executeEmailRuleAction(c dataprovider.EventActionEmailConfig, params *EventParams) error {
- addObjectData := false
- if params.Object != nil {
- if strings.Contains(c.Body, "{{ObjectData}}") {
- addObjectData = true
- }
- }
- replacements := params.getStringReplacements(addObjectData)
- replacer := strings.NewReplacer(replacements...)
- body := replaceWithReplacer(c.Body, replacer)
- subject := replaceWithReplacer(c.Subject, replacer)
- startTime := time.Now()
- var files []mail.File
- fileAttachments := make([]string, 0, len(c.Attachments))
- for _, attachment := range c.Attachments {
- if attachment == dataprovider.RetentionReportPlaceHolder {
- f, err := params.getRetentionReportsAsMailAttachment()
- if err != nil {
- return err
- }
- files = append(files, f)
- continue
- }
- fileAttachments = append(fileAttachments, attachment)
- }
- if len(fileAttachments) > 0 {
- user, err := params.getUserFromSender()
- if err != nil {
- return err
- }
- res, err := getMailAttachments(user, fileAttachments, replacer)
- if err != nil {
- return err
- }
- files = append(files, res...)
- }
- err := smtp.SendEmail(c.Recipients, subject, body, smtp.EmailContentTypeTextPlain, files...)
- eventManagerLog(logger.LevelDebug, "executed email notification action, elapsed: %s, error: %v",
- time.Since(startTime), err)
- if err != nil {
- return fmt.Errorf("unable to send email: %w", err)
- }
- return nil
- }
- func getUserForEventAction(user dataprovider.User) (dataprovider.User, error) {
- err := user.LoadAndApplyGroupSettings()
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to get group for user %q: %+v", user.Username, err)
- return dataprovider.User{}, fmt.Errorf("unable to get groups for user %q", user.Username)
- }
- user.UploadDataTransfer = 0
- user.UploadBandwidth = 0
- user.DownloadBandwidth = 0
- user.Filters.DisableFsChecks = false
- user.Filters.FilePatterns = nil
- user.Filters.BandwidthLimits = nil
- user.Filters.DataTransferLimits = nil
- for k := range user.Permissions {
- user.Permissions[k] = []string{dataprovider.PermAny}
- }
- return user, nil
- }
- func replacePathsPlaceholders(paths []string, replacer *strings.Replacer) []string {
- results := make([]string, 0, len(paths))
- for _, p := range paths {
- results = append(results, util.CleanPath(replaceWithReplacer(p, replacer)))
- }
- return util.RemoveDuplicates(results, false)
- }
- func executeDeleteFileFsAction(conn *BaseConnection, item string, info os.FileInfo) error {
- fs, fsPath, err := conn.GetFsAndResolvedPath(item)
- if err != nil {
- return err
- }
- return conn.RemoveFile(fs, fsPath, item, info)
- }
- func executeDeleteFsActionForUser(deletes []string, replacer *strings.Replacer, user dataprovider.User) error {
- user, err := getUserForEventAction(user)
- if err != nil {
- return err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return fmt.Errorf("delete error, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- for _, item := range replacePathsPlaceholders(deletes, replacer) {
- info, err := conn.DoStat(item, 0, false)
- if err != nil {
- if conn.IsNotExistError(err) {
- continue
- }
- return fmt.Errorf("unable to check item to delete %q, user %q: %w", item, user.Username, err)
- }
- if info.IsDir() {
- if err = conn.RemoveDir(item); err != nil {
- return fmt.Errorf("unable to remove dir %q, user %q: %w", item, user.Username, err)
- }
- } else {
- if err = executeDeleteFileFsAction(conn, item, info); err != nil {
- return fmt.Errorf("unable to remove file %q, user %q: %w", item, user.Username, err)
- }
- }
- eventManagerLog(logger.LevelDebug, "item %q removed for user %q", item, user.Username)
- }
- return nil
- }
- func executeDeleteFsRuleAction(deletes []string, replacer *strings.Replacer,
- conditions dataprovider.ConditionOptions, params *EventParams,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping fs delete for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping fs delete for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeDeleteFsActionForUser(deletes, replacer, user); err != nil {
- params.AddError(err)
- failures = append(failures, user.Username)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("fs delete failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no delete executed")
- return errors.New("no delete executed")
- }
- return nil
- }
- func executeMkDirsFsActionForUser(dirs []string, replacer *strings.Replacer, user dataprovider.User) error {
- user, err := getUserForEventAction(user)
- if err != nil {
- return err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return fmt.Errorf("mkdir error, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- for _, item := range replacePathsPlaceholders(dirs, replacer) {
- if err = conn.CheckParentDirs(path.Dir(item)); err != nil {
- return fmt.Errorf("unable to check parent dirs for %q, user %q: %w", item, user.Username, err)
- }
- if err = conn.createDirIfMissing(item); err != nil {
- return fmt.Errorf("unable to create dir %q, user %q: %w", item, user.Username, err)
- }
- eventManagerLog(logger.LevelDebug, "directory %q created for user %q", item, user.Username)
- }
- return nil
- }
- func executeMkdirFsRuleAction(dirs []string, replacer *strings.Replacer,
- conditions dataprovider.ConditionOptions, params *EventParams,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping fs mkdir for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping fs mkdir for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeMkDirsFsActionForUser(dirs, replacer, user); err != nil {
- failures = append(failures, user.Username)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("fs mkdir failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no mkdir executed")
- return errors.New("no mkdir executed")
- }
- return nil
- }
- func executeRenameFsActionForUser(renames []dataprovider.KeyValue, replacer *strings.Replacer,
- user dataprovider.User,
- ) error {
- user, err := getUserForEventAction(user)
- if err != nil {
- return err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return fmt.Errorf("rename error, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- for _, item := range renames {
- source := util.CleanPath(replaceWithReplacer(item.Key, replacer))
- target := util.CleanPath(replaceWithReplacer(item.Value, replacer))
- if err = conn.Rename(source, target); err != nil {
- return fmt.Errorf("unable to rename %q->%q, user %q: %w", source, target, user.Username, err)
- }
- eventManagerLog(logger.LevelDebug, "rename %q->%q ok, user %q", source, target, user.Username)
- }
- return nil
- }
- func executeExistFsActionForUser(exist []string, replacer *strings.Replacer,
- user dataprovider.User,
- ) error {
- user, err := getUserForEventAction(user)
- if err != nil {
- return err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return fmt.Errorf("existence check error, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- for _, item := range replacePathsPlaceholders(exist, replacer) {
- if _, err = conn.DoStat(item, 0, false); err != nil {
- return fmt.Errorf("error checking existence for path %q, user %q: %w", item, user.Username, err)
- }
- eventManagerLog(logger.LevelDebug, "path %q exists for user %q", item, user.Username)
- }
- return nil
- }
- func executeRenameFsRuleAction(renames []dataprovider.KeyValue, replacer *strings.Replacer,
- conditions dataprovider.ConditionOptions, params *EventParams,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping fs rename for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping fs rename for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeRenameFsActionForUser(renames, replacer, user); err != nil {
- failures = append(failures, user.Username)
- params.AddError(err)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("fs rename failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no rename executed")
- return errors.New("no rename executed")
- }
- return nil
- }
- func getArchiveBaseDir(paths []string) string {
- var parentDirs []string
- for _, p := range paths {
- parentDirs = append(parentDirs, path.Dir(p))
- }
- parentDirs = util.RemoveDuplicates(parentDirs, false)
- baseDir := "/"
- if len(parentDirs) == 1 {
- baseDir = parentDirs[0]
- }
- return baseDir
- }
- func executeCompressFsActionForUser(c dataprovider.EventActionFsCompress, replacer *strings.Replacer,
- user dataprovider.User,
- ) error {
- user, err := getUserForEventAction(user)
- if err != nil {
- return err
- }
- connectionID := fmt.Sprintf("%s_%s", protocolEventAction, xid.New().String())
- err = user.CheckFsRoot(connectionID)
- defer user.CloseFs() //nolint:errcheck
- if err != nil {
- return fmt.Errorf("compress error, unable to check root fs for user %q: %w", user.Username, err)
- }
- conn := NewBaseConnection(connectionID, protocolEventAction, "", "", user)
- name := util.CleanPath(replaceWithReplacer(c.Name, replacer))
- paths := make([]string, 0, len(c.Paths))
- for idx := range c.Paths {
- p := util.CleanPath(replaceWithReplacer(c.Paths[idx], replacer))
- if p == name {
- return fmt.Errorf("cannot compress the archive to create: %q", name)
- }
- paths = append(paths, p)
- }
- writer, numFiles, truncatedSize, cancelFn, err := getFileWriter(conn, name)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to create archive %q: %v", name, err)
- return fmt.Errorf("unable to create archive: %w", err)
- }
- defer cancelFn()
- paths = util.RemoveDuplicates(paths, false)
- baseDir := getArchiveBaseDir(paths)
- eventManagerLog(logger.LevelDebug, "creating archive %q for paths %+v", name, paths)
- zipWriter := &zipWriterWrapper{
- Name: name,
- Writer: zip.NewWriter(writer),
- Entries: make(map[string]bool),
- }
- for _, item := range paths {
- if err := addZipEntry(zipWriter, conn, item, baseDir); err != nil {
- closeWriterAndUpdateQuota(writer, conn, name, numFiles, truncatedSize, err) //nolint:errcheck
- return err
- }
- }
- if err := zipWriter.Writer.Close(); err != nil {
- eventManagerLog(logger.LevelError, "unable to close zip file %q: %v", name, err)
- closeWriterAndUpdateQuota(writer, conn, name, numFiles, truncatedSize, err) //nolint:errcheck
- return fmt.Errorf("unable to close zip file %q: %w", name, err)
- }
- return closeWriterAndUpdateQuota(writer, conn, name, numFiles, truncatedSize, err)
- }
- func executeExistFsRuleAction(exist []string, replacer *strings.Replacer, conditions dataprovider.ConditionOptions,
- params *EventParams,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping fs exist for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping fs exist for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeExistFsActionForUser(exist, replacer, user); err != nil {
- failures = append(failures, user.Username)
- params.AddError(err)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("fs existence check failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no existence check executed")
- return errors.New("no existence check executed")
- }
- return nil
- }
- func executeCompressFsRuleAction(c dataprovider.EventActionFsCompress, replacer *strings.Replacer,
- conditions dataprovider.ConditionOptions, params *EventParams,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping fs compress for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping fs compress for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeCompressFsActionForUser(c, replacer, user); err != nil {
- failures = append(failures, user.Username)
- params.AddError(err)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("fs compress failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no file/folder compressed")
- return errors.New("no file/folder compressed")
- }
- return nil
- }
- func executeFsRuleAction(c dataprovider.EventActionFilesystemConfig, conditions dataprovider.ConditionOptions,
- params *EventParams,
- ) error {
- addObjectData := false
- replacements := params.getStringReplacements(addObjectData)
- replacer := strings.NewReplacer(replacements...)
- switch c.Type {
- case dataprovider.FilesystemActionRename:
- return executeRenameFsRuleAction(c.Renames, replacer, conditions, params)
- case dataprovider.FilesystemActionDelete:
- return executeDeleteFsRuleAction(c.Deletes, replacer, conditions, params)
- case dataprovider.FilesystemActionMkdirs:
- return executeMkdirFsRuleAction(c.MkDirs, replacer, conditions, params)
- case dataprovider.FilesystemActionExist:
- return executeExistFsRuleAction(c.Exist, replacer, conditions, params)
- case dataprovider.FilesystemActionCompress:
- return executeCompressFsRuleAction(c.Compress, replacer, conditions, params)
- default:
- return fmt.Errorf("unsupported filesystem action %d", c.Type)
- }
- }
- func executeQuotaResetForUser(user dataprovider.User) error {
- if err := user.LoadAndApplyGroupSettings(); err != nil {
- eventManagerLog(logger.LevelDebug, "skipping scheduled quota reset for user %s, cannot apply group settings: %v",
- user.Username, err)
- return err
- }
- if !QuotaScans.AddUserQuotaScan(user.Username) {
- eventManagerLog(logger.LevelError, "another quota scan is already in progress for user %q", user.Username)
- return fmt.Errorf("another quota scan is in progress for user %q", user.Username)
- }
- defer QuotaScans.RemoveUserQuotaScan(user.Username)
- numFiles, size, err := user.ScanQuota()
- if err != nil {
- eventManagerLog(logger.LevelError, "error scanning quota for user %q: %v", user.Username, err)
- return fmt.Errorf("error scanning quota for user %q: %w", user.Username, err)
- }
- err = dataprovider.UpdateUserQuota(&user, numFiles, size, true)
- if err != nil {
- eventManagerLog(logger.LevelError, "error updating quota for user %q: %v", user.Username, err)
- return fmt.Errorf("error updating quota for user %q: %w", user.Username, err)
- }
- return nil
- }
- func executeUsersQuotaResetRuleAction(conditions dataprovider.ConditionOptions, params *EventParams) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failedResets []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping quota reset for user %q, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping quota reset for user %q, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeQuotaResetForUser(user); err != nil {
- params.AddError(err)
- failedResets = append(failedResets, user.Username)
- continue
- }
- }
- if len(failedResets) > 0 {
- return fmt.Errorf("quota reset failed for users: %+v", failedResets)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no user quota reset executed")
- return errors.New("no user quota reset executed")
- }
- return nil
- }
- func executeFoldersQuotaResetRuleAction(conditions dataprovider.ConditionOptions, params *EventParams) error {
- folders, err := params.getFolders()
- if err != nil {
- return fmt.Errorf("unable to get folders: %w", err)
- }
- var failedResets []string
- executed := 0
- for _, folder := range folders {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" && !checkEventConditionPatterns(folder.Name, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping scheduled quota reset for folder %s, name conditions don't match",
- folder.Name)
- continue
- }
- if !QuotaScans.AddVFolderQuotaScan(folder.Name) {
- eventManagerLog(logger.LevelError, "another quota scan is already in progress for folder %q", folder.Name)
- params.AddError(fmt.Errorf("another quota scan is already in progress for folder %q", folder.Name))
- failedResets = append(failedResets, folder.Name)
- continue
- }
- executed++
- f := vfs.VirtualFolder{
- BaseVirtualFolder: folder,
- VirtualPath: "/",
- }
- numFiles, size, err := f.ScanQuota()
- QuotaScans.RemoveVFolderQuotaScan(folder.Name)
- if err != nil {
- eventManagerLog(logger.LevelError, "error scanning quota for folder %q: %v", folder.Name, err)
- params.AddError(fmt.Errorf("error scanning quota for folder %q: %w", folder.Name, err))
- failedResets = append(failedResets, folder.Name)
- continue
- }
- err = dataprovider.UpdateVirtualFolderQuota(&folder, numFiles, size, true)
- if err != nil {
- eventManagerLog(logger.LevelError, "error updating quota for folder %q: %v", folder.Name, err)
- params.AddError(fmt.Errorf("error updating quota for folder %q: %w", folder.Name, err))
- failedResets = append(failedResets, folder.Name)
- }
- }
- if len(failedResets) > 0 {
- return fmt.Errorf("quota reset failed for folders: %+v", failedResets)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no folder quota reset executed")
- return errors.New("no folder quota reset executed")
- }
- return nil
- }
- func executeTransferQuotaResetRuleAction(conditions dataprovider.ConditionOptions, params *EventParams) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failedResets []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping scheduled transfer quota reset for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping scheduled transfer quota reset for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- err = dataprovider.UpdateUserTransferQuota(&user, 0, 0, true)
- if err != nil {
- eventManagerLog(logger.LevelError, "error updating transfer quota for user %q: %v", user.Username, err)
- params.AddError(fmt.Errorf("error updating transfer quota for user %q: %w", user.Username, err))
- failedResets = append(failedResets, user.Username)
- }
- }
- if len(failedResets) > 0 {
- return fmt.Errorf("transfer quota reset failed for users: %+v", failedResets)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no transfer quota reset executed")
- return errors.New("no transfer quota reset executed")
- }
- return nil
- }
- func executeDataRetentionCheckForUser(user dataprovider.User, folders []dataprovider.FolderRetention,
- params *EventParams, actionName string,
- ) error {
- if err := user.LoadAndApplyGroupSettings(); err != nil {
- eventManagerLog(logger.LevelDebug, "skipping scheduled retention check for user %s, cannot apply group settings: %v",
- user.Username, err)
- return err
- }
- check := RetentionCheck{
- Folders: folders,
- }
- c := RetentionChecks.Add(check, &user)
- if c == nil {
- eventManagerLog(logger.LevelError, "another retention check is already in progress for user %q", user.Username)
- return fmt.Errorf("another retention check is in progress for user %q", user.Username)
- }
- defer func() {
- params.retentionChecks = append(params.retentionChecks, executedRetentionCheck{
- Username: user.Username,
- ActionName: actionName,
- Results: c.results,
- })
- }()
- if err := c.Start(); err != nil {
- eventManagerLog(logger.LevelError, "error checking retention for user %q: %v", user.Username, err)
- return fmt.Errorf("error checking retention for user %q: %w", user.Username, err)
- }
- return nil
- }
- func executeDataRetentionCheckRuleAction(config dataprovider.EventActionDataRetentionConfig,
- conditions dataprovider.ConditionOptions, params *EventParams, actionName string,
- ) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failedChecks []string
- executed := 0
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping scheduled retention check for user %s, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping scheduled retention check for user %s, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeDataRetentionCheckForUser(user, config.Folders, params, actionName); err != nil {
- failedChecks = append(failedChecks, user.Username)
- params.AddError(err)
- continue
- }
- }
- if len(failedChecks) > 0 {
- return fmt.Errorf("retention check failed for users: %+v", failedChecks)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no retention check executed")
- return errors.New("no retention check executed")
- }
- return nil
- }
- func executeMetadataCheckForUser(user dataprovider.User) error {
- if err := user.LoadAndApplyGroupSettings(); err != nil {
- eventManagerLog(logger.LevelDebug, "skipping scheduled quota reset for user %s, cannot apply group settings: %v",
- user.Username, err)
- return err
- }
- if !ActiveMetadataChecks.Add(user.Username) {
- eventManagerLog(logger.LevelError, "another metadata check is already in progress for user %q", user.Username)
- return fmt.Errorf("another metadata check is in progress for user %q", user.Username)
- }
- defer ActiveMetadataChecks.Remove(user.Username)
- if err := user.CheckMetadataConsistency(); err != nil {
- eventManagerLog(logger.LevelError, "error checking metadata consistence for user %q: %v", user.Username, err)
- return fmt.Errorf("error checking metadata consistence for user %q: %w", user.Username, err)
- }
- return nil
- }
- func executeMetadataCheckRuleAction(conditions dataprovider.ConditionOptions, params *EventParams) error {
- users, err := params.getUsers()
- if err != nil {
- return fmt.Errorf("unable to get users: %w", err)
- }
- var failures []string
- var executed int
- for _, user := range users {
- // if sender is set, the conditions have already been evaluated
- if params.sender == "" {
- if !checkEventConditionPatterns(user.Username, conditions.Names) {
- eventManagerLog(logger.LevelDebug, "skipping metadata check for user %q, name conditions don't match",
- user.Username)
- continue
- }
- if !checkEventGroupConditionPatters(user.Groups, conditions.GroupNames) {
- eventManagerLog(logger.LevelDebug, "skipping metadata check for user %q, group name conditions don't match",
- user.Username)
- continue
- }
- }
- executed++
- if err = executeMetadataCheckForUser(user); err != nil {
- params.AddError(err)
- failures = append(failures, user.Username)
- continue
- }
- }
- if len(failures) > 0 {
- return fmt.Errorf("metadata check failed for users: %+v", failures)
- }
- if executed == 0 {
- eventManagerLog(logger.LevelError, "no metadata check executed")
- return errors.New("no metadata check executed")
- }
- return nil
- }
- func executeRuleAction(action dataprovider.BaseEventAction, params *EventParams,
- conditions dataprovider.ConditionOptions,
- ) error {
- var err error
- switch action.Type {
- case dataprovider.ActionTypeHTTP:
- err = executeHTTPRuleAction(action.Options.HTTPConfig, params)
- case dataprovider.ActionTypeCommand:
- err = executeCommandRuleAction(action.Options.CmdConfig, params)
- case dataprovider.ActionTypeEmail:
- err = executeEmailRuleAction(action.Options.EmailConfig, params)
- case dataprovider.ActionTypeBackup:
- var backupPath string
- backupPath, err = dataprovider.ExecuteBackup()
- if err == nil {
- params.setBackupParams(backupPath)
- }
- case dataprovider.ActionTypeUserQuotaReset:
- err = executeUsersQuotaResetRuleAction(conditions, params)
- case dataprovider.ActionTypeFolderQuotaReset:
- err = executeFoldersQuotaResetRuleAction(conditions, params)
- case dataprovider.ActionTypeTransferQuotaReset:
- err = executeTransferQuotaResetRuleAction(conditions, params)
- case dataprovider.ActionTypeDataRetentionCheck:
- err = executeDataRetentionCheckRuleAction(action.Options.RetentionConfig, conditions, params, action.Name)
- case dataprovider.ActionTypeMetadataCheck:
- err = executeMetadataCheckRuleAction(conditions, params)
- case dataprovider.ActionTypeFilesystem:
- err = executeFsRuleAction(action.Options.FsConfig, conditions, params)
- default:
- err = fmt.Errorf("unsupported action type: %d", action.Type)
- }
- if err != nil {
- err = fmt.Errorf("action %q failed: %w", action.Name, err)
- }
- params.AddError(err)
- return err
- }
- func executeSyncRulesActions(rules []dataprovider.EventRule, params EventParams) error {
- var errRes error
- for _, rule := range rules {
- var failedActions []string
- paramsCopy := params.getACopy()
- for _, action := range rule.Actions {
- if !action.Options.IsFailureAction && action.Options.ExecuteSync {
- startTime := time.Now()
- if err := executeRuleAction(action.BaseEventAction, paramsCopy, rule.Conditions.Options); err != nil {
- eventManagerLog(logger.LevelError, "unable to execute sync action %q for rule %q, elapsed %s, err: %v",
- action.Name, rule.Name, time.Since(startTime), err)
- failedActions = append(failedActions, action.Name)
- // we return the last error, it is ok for now
- errRes = err
- if action.Options.StopOnFailure {
- break
- }
- } else {
- eventManagerLog(logger.LevelDebug, "executed sync action %q for rule %q, elapsed: %s",
- action.Name, rule.Name, time.Since(startTime))
- }
- }
- }
- // execute async actions if any, including failure actions
- go executeRuleAsyncActions(rule, paramsCopy, failedActions)
- }
- return errRes
- }
- func executeAsyncRulesActions(rules []dataprovider.EventRule, params EventParams) {
- eventManager.addAsyncTask()
- defer eventManager.removeAsyncTask()
- for _, rule := range rules {
- executeRuleAsyncActions(rule, params.getACopy(), nil)
- }
- }
- func executeRuleAsyncActions(rule dataprovider.EventRule, params *EventParams, failedActions []string) {
- for _, action := range rule.Actions {
- if !action.Options.IsFailureAction && !action.Options.ExecuteSync {
- startTime := time.Now()
- if err := executeRuleAction(action.BaseEventAction, params, rule.Conditions.Options); err != nil {
- eventManagerLog(logger.LevelError, "unable to execute action %q for rule %q, elapsed %s, err: %v",
- action.Name, rule.Name, time.Since(startTime), err)
- failedActions = append(failedActions, action.Name)
- if action.Options.StopOnFailure {
- break
- }
- } else {
- eventManagerLog(logger.LevelDebug, "executed action %q for rule %q, elapsed %s",
- action.Name, rule.Name, time.Since(startTime))
- }
- }
- }
- if len(failedActions) > 0 {
- params.updateStatusFromError = false
- // execute failure actions
- for _, action := range rule.Actions {
- if action.Options.IsFailureAction {
- startTime := time.Now()
- if err := executeRuleAction(action.BaseEventAction, params, rule.Conditions.Options); err != nil {
- eventManagerLog(logger.LevelError, "unable to execute failure action %q for rule %q, elapsed %s, err: %v",
- action.Name, rule.Name, time.Since(startTime), err)
- if action.Options.StopOnFailure {
- break
- }
- } else {
- eventManagerLog(logger.LevelDebug, "executed failure action %q for rule %q, elapsed: %s",
- action.Name, rule.Name, time.Since(startTime))
- }
- }
- }
- }
- }
- type eventCronJob struct {
- ruleName string
- }
- func (j *eventCronJob) getTask(rule dataprovider.EventRule) (dataprovider.Task, error) {
- if rule.GuardFromConcurrentExecution() {
- task, err := dataprovider.GetTaskByName(rule.Name)
- if _, ok := err.(*util.RecordNotFoundError); ok {
- eventManagerLog(logger.LevelDebug, "adding task for rule %q", rule.Name)
- task = dataprovider.Task{
- Name: rule.Name,
- UpdateAt: 0,
- Version: 0,
- }
- err = dataprovider.AddTask(rule.Name)
- if err != nil {
- eventManagerLog(logger.LevelWarn, "unable to add task for rule %q: %v", rule.Name, err)
- return task, err
- }
- } else {
- eventManagerLog(logger.LevelWarn, "unable to get task for rule %q: %v", rule.Name, err)
- }
- return task, err
- }
- return dataprovider.Task{}, nil
- }
- func (j *eventCronJob) Run() {
- eventManagerLog(logger.LevelDebug, "executing scheduled rule %q", j.ruleName)
- rule, err := dataprovider.EventRuleExists(j.ruleName)
- if err != nil {
- eventManagerLog(logger.LevelError, "unable to load rule with name %q", j.ruleName)
- return
- }
- if err = rule.CheckActionsConsistency(""); err != nil {
- eventManagerLog(logger.LevelWarn, "scheduled rule %q skipped: %v", rule.Name, err)
- return
- }
- task, err := j.getTask(rule)
- if err != nil {
- return
- }
- if task.Name != "" {
- updateInterval := 5 * time.Minute
- updatedAt := util.GetTimeFromMsecSinceEpoch(task.UpdateAt)
- if updatedAt.Add(updateInterval*2 + 1).After(time.Now()) {
- eventManagerLog(logger.LevelDebug, "task for rule %q too recent: %s, skip execution", rule.Name, updatedAt)
- return
- }
- err = dataprovider.UpdateTask(rule.Name, task.Version)
- if err != nil {
- eventManagerLog(logger.LevelInfo, "unable to update task timestamp for rule %q, skip execution, err: %v",
- rule.Name, err)
- return
- }
- ticker := time.NewTicker(updateInterval)
- done := make(chan bool)
- defer func() {
- done <- true
- ticker.Stop()
- }()
- go func(taskName string) {
- eventManagerLog(logger.LevelDebug, "update task %q timestamp worker started", taskName)
- for {
- select {
- case <-done:
- eventManagerLog(logger.LevelDebug, "update task %q timestamp worker finished", taskName)
- return
- case <-ticker.C:
- err := dataprovider.UpdateTaskTimestamp(taskName)
- eventManagerLog(logger.LevelInfo, "updated timestamp for task %q, err: %v", taskName, err)
- }
- }
- }(task.Name)
- executeAsyncRulesActions([]dataprovider.EventRule{rule}, EventParams{Status: 1, updateStatusFromError: true})
- } else {
- executeAsyncRulesActions([]dataprovider.EventRule{rule}, EventParams{Status: 1, updateStatusFromError: true})
- }
- eventManagerLog(logger.LevelDebug, "execution for scheduled rule %q finished", j.ruleName)
- }
- type zipWriterWrapper struct {
- Name string
- Entries map[string]bool
- Writer *zip.Writer
- }
- func eventManagerLog(level logger.LogLevel, format string, v ...any) {
- logger.Log(level, "eventmanager", "", format, v...)
- }
|