Browse Source

Add Organizational event logging feature

This PR adds event/audit logging support for organizations.
By default this feature is disabled, since it does log a lot and adds
extra database transactions.

All events are touched except a few, since we do not support those
features (yet), like SSO for example.

This feature is tested with multiple clients and all database types.

Fixes #229
BlackDex 3 years ago
parent
commit
2ea9b66943

+ 24 - 3
.env.template

@@ -1,13 +1,14 @@
+# shellcheck disable=SC2034,SC2148
 ## Vaultwarden Configuration File
 ## Vaultwarden Configuration File
 ## Uncomment any of the following lines to change the defaults
 ## Uncomment any of the following lines to change the defaults
 ##
 ##
 ## Be aware that most of these settings will be overridden if they were changed
 ## Be aware that most of these settings will be overridden if they were changed
 ## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
 ## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json .
 ##
 ##
-## By default, vaultwarden expects for this file to be named ".env" and located
+## By default, Vaultwarden expects for this file to be named ".env" and located
 ## in the current working directory. If this is not the case, the environment
 ## in the current working directory. If this is not the case, the environment
 ## variable ENV_FILE can be set to the location of this file prior to starting
 ## variable ENV_FILE can be set to the location of this file prior to starting
-## vaultwarden.
+## Vaultwarden.
 
 
 ## Main data folder
 ## Main data folder
 # DATA_FOLDER=data
 # DATA_FOLDER=data
@@ -80,11 +81,27 @@
 ## This setting applies globally to all users.
 ## This setting applies globally to all users.
 # EMERGENCY_ACCESS_ALLOWED=true
 # EMERGENCY_ACCESS_ALLOWED=true
 
 
+## Controls whether event logging is enabled for organizations
+## This setting applies to organizations.
+## Default this is disabled. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings.
+# ORG_EVENTS_ENABLED=false
+
+## Number of days to retain events stored in the database.
+## If unset (the default), events are kept indefently and also disables the scheduled job!
+# EVENTS_DAYS_RETAIN=
+
 ## Job scheduler settings
 ## Job scheduler settings
 ##
 ##
 ## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
 ## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron),
 ## and are always in terms of UTC time (regardless of your local time zone settings).
 ## and are always in terms of UTC time (regardless of your local time zone settings).
 ##
 ##
+## The schedule format is a bit different from crontab as crontab does not contains seconds.
+## You can test the the format here: https://crontab.guru, but remove the first digit!
+## SEC  MIN   HOUR   DAY OF MONTH    MONTH   DAY OF WEEK
+## "0   30   9,12,15     1,15       May-Aug  Mon,Wed,Fri"
+## "0   30     *          *            *          *     "
+## "0   30     1          *            *          *     "
+##
 ## How often (in ms) the job scheduler thread checks for jobs that need running.
 ## How often (in ms) the job scheduler thread checks for jobs that need running.
 ## Set to 0 to globally disable scheduled jobs.
 ## Set to 0 to globally disable scheduled jobs.
 # JOB_POLL_INTERVAL_MS=30000
 # JOB_POLL_INTERVAL_MS=30000
@@ -108,6 +125,10 @@
 ## Cron schedule of the job that grants emergency access requests that have met the required wait time.
 ## Cron schedule of the job that grants emergency access requests that have met the required wait time.
 ## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
 ## Defaults to hourly (5 minutes after the hour). Set blank to disable this job.
 # EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
 # EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 5 * * * *"
+##
+## Cron schedule of the job that cleans old events from the event table.
+## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start.
+# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *"
 
 
 ## Enable extended logging, which shows timestamps and targets in the logs
 ## Enable extended logging, which shows timestamps and targets in the logs
 # EXTENDED_LOGGING=true
 # EXTENDED_LOGGING=true
@@ -133,7 +154,7 @@
 ## Enable WAL for the DB
 ## Enable WAL for the DB
 ## Set to false to avoid enabling WAL during startup.
 ## Set to false to avoid enabling WAL during startup.
 ## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
 ## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB,
-## this setting only prevents vaultwarden from automatically enabling it on start.
+## this setting only prevents Vaultwarden from automatically enabling it on start.
 ## Please read project wiki page about this setting first before changing the value as it can
 ## Please read project wiki page about this setting first before changing the value as it can
 ## cause performance degradation or might render the service unable to start.
 ## cause performance degradation or might render the service unable to start.
 # ENABLE_DB_WAL=true
 # ENABLE_DB_WAL=true

+ 1 - 0
migrations/mysql/2022-10-18-170602_add_events/down.sql

@@ -0,0 +1 @@
+DROP TABLE event;

+ 19 - 0
migrations/mysql/2022-10-18-170602_add_events/up.sql

@@ -0,0 +1,19 @@
+CREATE TABLE event (
+  uuid               CHAR(36)    NOT NULL PRIMARY KEY,
+  event_type         INTEGER     NOT NULL,
+  user_uuid          CHAR(36),
+  org_uuid           CHAR(36),
+  cipher_uuid        CHAR(36),
+  collection_uuid    CHAR(36),
+  group_uuid         CHAR(36),
+  org_user_uuid      CHAR(36),
+  act_user_uuid      CHAR(36),
+  device_type        INTEGER,
+  ip_address         TEXT,
+  event_date         DATETIME    NOT NULL,
+  policy_uuid        CHAR(36),
+  provider_uuid      CHAR(36),
+  provider_user_uuid CHAR(36),
+  provider_org_uuid  CHAR(36),
+  UNIQUE (uuid)
+);

+ 1 - 0
migrations/postgresql/2022-10-18-170602_add_events/down.sql

@@ -0,0 +1 @@
+DROP TABLE event;

+ 19 - 0
migrations/postgresql/2022-10-18-170602_add_events/up.sql

@@ -0,0 +1,19 @@
+CREATE TABLE event (
+  uuid               CHAR(36)        NOT NULL PRIMARY KEY,
+  event_type         INTEGER     NOT NULL,
+  user_uuid          CHAR(36),
+  org_uuid           CHAR(36),
+  cipher_uuid        CHAR(36),
+  collection_uuid    CHAR(36),
+  group_uuid         CHAR(36),
+  org_user_uuid      CHAR(36),
+  act_user_uuid      CHAR(36),
+  device_type        INTEGER,
+  ip_address         TEXT,
+  event_date         TIMESTAMP    NOT NULL,
+  policy_uuid        CHAR(36),
+  provider_uuid      CHAR(36),
+  provider_user_uuid CHAR(36),
+  provider_org_uuid  CHAR(36),
+  UNIQUE (uuid)
+);

+ 1 - 0
migrations/sqlite/2022-10-18-170602_add_events/down.sql

@@ -0,0 +1 @@
+DROP TABLE event;

+ 19 - 0
migrations/sqlite/2022-10-18-170602_add_events/up.sql

@@ -0,0 +1,19 @@
+CREATE TABLE event (
+  uuid               TEXT        NOT NULL PRIMARY KEY,
+  event_type         INTEGER     NOT NULL,
+  user_uuid          TEXT,
+  org_uuid           TEXT,
+  cipher_uuid        TEXT,
+  collection_uuid    TEXT,
+  group_uuid         TEXT,
+  org_user_uuid      TEXT,
+  act_user_uuid      TEXT,
+  device_type        INTEGER,
+  ip_address         TEXT,
+  event_date         DATETIME    NOT NULL,
+  policy_uuid        TEXT,
+  provider_uuid      TEXT,
+  provider_user_uuid TEXT,
+  provider_org_uuid  TEXT,
+  UNIQUE (uuid)
+);

+ 40 - 4
src/api/admin.rs

@@ -13,7 +13,7 @@ use rocket::{
 };
 };
 
 
 use crate::{
 use crate::{
-    api::{ApiResult, EmptyResult, JsonResult, NumberOrString},
+    api::{core::log_event, ApiResult, EmptyResult, JsonResult, NumberOrString},
     auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
     auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
     config::ConfigBuilder,
     config::ConfigBuilder,
     db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
     db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType},
@@ -81,6 +81,8 @@ const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z";
 
 
 const BASE_TEMPLATE: &str = "admin/base";
 const BASE_TEMPLATE: &str = "admin/base";
 
 
+const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000";
+
 fn admin_path() -> String {
 fn admin_path() -> String {
     format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
     format!("{}{}", CONFIG.domain_path(), ADMIN_PATH)
 }
 }
@@ -361,9 +363,27 @@ async fn get_user_json(uuid: String, _token: AdminToken, mut conn: DbConn) -> Js
 }
 }
 
 
 #[post("/users/<uuid>/delete")]
 #[post("/users/<uuid>/delete")]
-async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
+async fn delete_user(uuid: String, _token: AdminToken, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
     let user = get_user_or_404(&uuid, &mut conn).await?;
     let user = get_user_or_404(&uuid, &mut conn).await?;
-    user.delete(&mut conn).await
+
+    // Get the user_org records before deleting the actual user
+    let user_orgs = UserOrganization::find_any_state_by_user(&uuid, &mut conn).await;
+    let res = user.delete(&mut conn).await;
+
+    for user_org in user_orgs {
+        log_event(
+            EventType::OrganizationUserRemoved as i32,
+            &user_org.uuid,
+            user_org.org_uuid,
+            String::from(ACTING_ADMIN_USER),
+            14, // Use UnknownBrowser type
+            &ip.ip,
+            &mut conn,
+        )
+        .await;
+    }
+
+    res
 }
 }
 
 
 #[post("/users/<uuid>/deauth")]
 #[post("/users/<uuid>/deauth")]
@@ -409,7 +429,12 @@ struct UserOrgTypeData {
 }
 }
 
 
 #[post("/users/org_type", data = "<data>")]
 #[post("/users/org_type", data = "<data>")]
-async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, mut conn: DbConn) -> EmptyResult {
+async fn update_user_org_type(
+    data: Json<UserOrgTypeData>,
+    _token: AdminToken,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
     let data: UserOrgTypeData = data.into_inner();
     let data: UserOrgTypeData = data.into_inner();
 
 
     let mut user_to_edit =
     let mut user_to_edit =
@@ -444,6 +469,17 @@ async fn update_user_org_type(data: Json<UserOrgTypeData>, _token: AdminToken, m
         }
         }
     }
     }
 
 
+    log_event(
+        EventType::OrganizationUserUpdated as i32,
+        &user_to_edit.uuid,
+        data.org_uuid,
+        String::from(ACTING_ADMIN_USER),
+        14, // Use UnknownBrowser type
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     user_to_edit.atype = new_type;
     user_to_edit.atype = new_type;
     user_to_edit.save(&mut conn).await
     user_to_edit.save(&mut conn).await
 }
 }

+ 20 - 5
src/api/core/accounts.rs

@@ -3,8 +3,10 @@ use rocket::serde::json::Json;
 use serde_json::Value;
 use serde_json::Value;
 
 
 use crate::{
 use crate::{
-    api::{EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType},
-    auth::{decode_delete, decode_invite, decode_verify_email, Headers},
+    api::{
+        core::log_user_event, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType,
+    },
+    auth::{decode_delete, decode_invite, decode_verify_email, ClientIp, Headers},
     crypto,
     crypto,
     db::{models::*, DbConn},
     db::{models::*, DbConn},
     mail, CONFIG,
     mail, CONFIG,
@@ -268,7 +270,12 @@ struct ChangePassData {
 }
 }
 
 
 #[post("/accounts/password", data = "<data>")]
 #[post("/accounts/password", data = "<data>")]
-async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
+async fn post_password(
+    data: JsonUpcase<ChangePassData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
     let data: ChangePassData = data.into_inner().data;
     let data: ChangePassData = data.into_inner().data;
     let mut user = headers.user;
     let mut user = headers.user;
 
 
@@ -279,6 +286,8 @@ async fn post_password(data: JsonUpcase<ChangePassData>, headers: Headers, mut c
     user.password_hint = clean_password_hint(&data.MasterPasswordHint);
     user.password_hint = clean_password_hint(&data.MasterPasswordHint);
     enforce_password_hint_setting(&user.password_hint)?;
     enforce_password_hint_setting(&user.password_hint)?;
 
 
+    log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     user.set_password(
     user.set_password(
         &data.NewMasterPasswordHash,
         &data.NewMasterPasswordHash,
         Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
         Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]),
@@ -334,7 +343,13 @@ struct KeyData {
 }
 }
 
 
 #[post("/accounts/key", data = "<data>")]
 #[post("/accounts/key", data = "<data>")]
-async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
+async fn post_rotatekey(
+    data: JsonUpcase<KeyData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
     let data: KeyData = data.into_inner().data;
     let data: KeyData = data.into_inner().data;
 
 
     if !headers.user.check_valid_password(&data.MasterPasswordHash) {
     if !headers.user.check_valid_password(&data.MasterPasswordHash) {
@@ -373,7 +388,7 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D
 
 
         // Prevent triggering cipher updates via WebSockets by settings UpdateType::None
         // Prevent triggering cipher updates via WebSockets by settings UpdateType::None
         // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
         // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues.
-        update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None)
+        update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
             .await?
             .await?
     }
     }
 
 

+ 257 - 77
src/api/core/ciphers.rs

@@ -10,8 +10,8 @@ use rocket::{
 use serde_json::Value;
 use serde_json::Value;
 
 
 use crate::{
 use crate::{
-    api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
-    auth::Headers,
+    api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType},
+    auth::{ClientIp, Headers},
     crypto,
     crypto,
     db::{models::*, DbConn, DbPool},
     db::{models::*, DbConn, DbPool},
     CONFIG,
     CONFIG,
@@ -247,9 +247,10 @@ async fn post_ciphers_admin(
     data: JsonUpcase<ShareCipherData>,
     data: JsonUpcase<ShareCipherData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    post_ciphers_create(data, headers, conn, nt).await
+    post_ciphers_create(data, headers, conn, ip, nt).await
 }
 }
 
 
 /// Called when creating a new org-owned cipher, or cloning a cipher (whether
 /// Called when creating a new org-owned cipher, or cloning a cipher (whether
@@ -260,6 +261,7 @@ async fn post_ciphers_create(
     data: JsonUpcase<ShareCipherData>,
     data: JsonUpcase<ShareCipherData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let mut data: ShareCipherData = data.into_inner().data;
     let mut data: ShareCipherData = data.into_inner().data;
@@ -287,12 +289,18 @@ async fn post_ciphers_create(
     // or otherwise), we can just ignore this field entirely.
     // or otherwise), we can just ignore this field entirely.
     data.Cipher.LastKnownRevisionDate = None;
     data.Cipher.LastKnownRevisionDate = None;
 
 
-    share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await
+    share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 /// Called when creating a new user-owned cipher.
 /// Called when creating a new user-owned cipher.
 #[post("/ciphers", data = "<data>")]
 #[post("/ciphers", data = "<data>")]
-async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
+async fn post_ciphers(
+    data: JsonUpcase<CipherData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> JsonResult {
     let mut data: CipherData = data.into_inner().data;
     let mut data: CipherData = data.into_inner().data;
 
 
     // The web/browser clients set this field to null as expected, but the
     // The web/browser clients set this field to null as expected, but the
@@ -302,7 +310,7 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn:
     data.LastKnownRevisionDate = None;
     data.LastKnownRevisionDate = None;
 
 
     let mut cipher = Cipher::new(data.Type, data.Name.clone());
     let mut cipher = Cipher::new(data.Type, data.Name.clone());
-    update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::CipherCreate).await?;
+    update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &ip, &nt, UpdateType::CipherCreate).await?;
 
 
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
 }
 }
@@ -329,12 +337,14 @@ async fn enforce_personal_ownership_policy(
     Ok(())
     Ok(())
 }
 }
 
 
+#[allow(clippy::too_many_arguments)]
 pub async fn update_cipher_from_data(
 pub async fn update_cipher_from_data(
     cipher: &mut Cipher,
     cipher: &mut Cipher,
     data: CipherData,
     data: CipherData,
     headers: &Headers,
     headers: &Headers,
     shared_to_collection: bool,
     shared_to_collection: bool,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
     nt: &Notify<'_>,
     nt: &Notify<'_>,
     ut: UpdateType,
     ut: UpdateType,
 ) -> EmptyResult {
 ) -> EmptyResult {
@@ -356,6 +366,9 @@ pub async fn update_cipher_from_data(
         err!("Organization mismatch. Please resync the client before updating the cipher")
         err!("Organization mismatch. Please resync the client before updating the cipher")
     }
     }
 
 
+    // Check if this cipher is being transferred from a personal to an organization vault
+    let transfer_cipher = cipher.organization_uuid.is_none() && data.OrganizationId.is_some();
+
     if let Some(org_id) = data.OrganizationId {
     if let Some(org_id) = data.OrganizationId {
         match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
         match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await {
             None => err!("You don't have permission to add item to organization"),
             None => err!("You don't have permission to add item to organization"),
@@ -460,6 +473,26 @@ pub async fn update_cipher_from_data(
     cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?;
     cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?;
 
 
     if ut != UpdateType::None {
     if ut != UpdateType::None {
+        // Only log events for organizational ciphers
+        if let Some(org_uuid) = &cipher.organization_uuid {
+            let event_type = match (&ut, transfer_cipher) {
+                (UpdateType::CipherCreate, true) => EventType::CipherCreated,
+                (UpdateType::CipherUpdate, true) => EventType::CipherShared,
+                (_, _) => EventType::CipherUpdated,
+            };
+
+            log_event(
+                event_type as i32,
+                &cipher.uuid,
+                String::from(org_uuid),
+                headers.user.uuid.clone(),
+                headers.device.atype,
+                &ip.ip,
+                conn,
+            )
+            .await;
+        }
+
         nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await).await;
         nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await).await;
     }
     }
 
 
@@ -488,6 +521,7 @@ async fn post_ciphers_import(
     data: JsonUpcase<ImportData>,
     data: JsonUpcase<ImportData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     enforce_personal_ownership_policy(None, &headers, &mut conn).await?;
     enforce_personal_ownership_policy(None, &headers, &mut conn).await?;
@@ -516,7 +550,8 @@ async fn post_ciphers_import(
         cipher_data.FolderId = folder_uuid;
         cipher_data.FolderId = folder_uuid;
 
 
         let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
         let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
-        update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?;
+        update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
+            .await?;
     }
     }
 
 
     let mut user = headers.user;
     let mut user = headers.user;
@@ -532,9 +567,10 @@ async fn put_cipher_admin(
     data: JsonUpcase<CipherData>,
     data: JsonUpcase<CipherData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    put_cipher(uuid, data, headers, conn, nt).await
+    put_cipher(uuid, data, headers, conn, ip, nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/admin", data = "<data>")]
 #[post("/ciphers/<uuid>/admin", data = "<data>")]
@@ -543,9 +579,10 @@ async fn post_cipher_admin(
     data: JsonUpcase<CipherData>,
     data: JsonUpcase<CipherData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    post_cipher(uuid, data, headers, conn, nt).await
+    post_cipher(uuid, data, headers, conn, ip, nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>", data = "<data>")]
 #[post("/ciphers/<uuid>", data = "<data>")]
@@ -554,9 +591,10 @@ async fn post_cipher(
     data: JsonUpcase<CipherData>,
     data: JsonUpcase<CipherData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    put_cipher(uuid, data, headers, conn, nt).await
+    put_cipher(uuid, data, headers, conn, ip, nt).await
 }
 }
 
 
 #[put("/ciphers/<uuid>", data = "<data>")]
 #[put("/ciphers/<uuid>", data = "<data>")]
@@ -565,6 +603,7 @@ async fn put_cipher(
     data: JsonUpcase<CipherData>,
     data: JsonUpcase<CipherData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: CipherData = data.into_inner().data;
     let data: CipherData = data.into_inner().data;
@@ -583,7 +622,7 @@ async fn put_cipher(
         err!("Cipher is not write accessible")
         err!("Cipher is not write accessible")
     }
     }
 
 
-    update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::CipherUpdate).await?;
+    update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &ip, &nt, UpdateType::CipherUpdate).await?;
 
 
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
 }
 }
@@ -600,8 +639,9 @@ async fn put_collections_update(
     data: JsonUpcase<CollectionsAdminData>,
     data: JsonUpcase<CollectionsAdminData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    post_collections_admin(uuid, data, headers, conn).await
+    post_collections_admin(uuid, data, headers, conn, ip).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/collections", data = "<data>")]
 #[post("/ciphers/<uuid>/collections", data = "<data>")]
@@ -610,8 +650,9 @@ async fn post_collections_update(
     data: JsonUpcase<CollectionsAdminData>,
     data: JsonUpcase<CollectionsAdminData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    post_collections_admin(uuid, data, headers, conn).await
+    post_collections_admin(uuid, data, headers, conn, ip).await
 }
 }
 
 
 #[put("/ciphers/<uuid>/collections-admin", data = "<data>")]
 #[put("/ciphers/<uuid>/collections-admin", data = "<data>")]
@@ -620,8 +661,9 @@ async fn put_collections_admin(
     data: JsonUpcase<CollectionsAdminData>,
     data: JsonUpcase<CollectionsAdminData>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    post_collections_admin(uuid, data, headers, conn).await
+    post_collections_admin(uuid, data, headers, conn, ip).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/collections-admin", data = "<data>")]
 #[post("/ciphers/<uuid>/collections-admin", data = "<data>")]
@@ -630,6 +672,7 @@ async fn post_collections_admin(
     data: JsonUpcase<CollectionsAdminData>,
     data: JsonUpcase<CollectionsAdminData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: CollectionsAdminData = data.into_inner().data;
     let data: CollectionsAdminData = data.into_inner().data;
 
 
@@ -665,6 +708,17 @@ async fn post_collections_admin(
         }
         }
     }
     }
 
 
+    log_event(
+        EventType::CipherUpdatedCollections as i32,
+        &cipher.uuid,
+        cipher.organization_uuid.unwrap(),
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     Ok(())
     Ok(())
 }
 }
 
 
@@ -681,11 +735,12 @@ async fn post_cipher_share(
     data: JsonUpcase<ShareCipherData>,
     data: JsonUpcase<ShareCipherData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: ShareCipherData = data.into_inner().data;
     let data: ShareCipherData = data.into_inner().data;
 
 
-    share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &nt).await
+    share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[put("/ciphers/<uuid>/share", data = "<data>")]
 #[put("/ciphers/<uuid>/share", data = "<data>")]
@@ -694,11 +749,12 @@ async fn put_cipher_share(
     data: JsonUpcase<ShareCipherData>,
     data: JsonUpcase<ShareCipherData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: ShareCipherData = data.into_inner().data;
     let data: ShareCipherData = data.into_inner().data;
 
 
-    share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &nt).await
+    share_cipher_by_uuid(&uuid, data, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[derive(Deserialize)]
 #[derive(Deserialize)]
@@ -713,6 +769,7 @@ async fn put_cipher_share_selected(
     data: JsonUpcase<ShareSelectedCipherData>,
     data: JsonUpcase<ShareSelectedCipherData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let mut data: ShareSelectedCipherData = data.into_inner().data;
     let mut data: ShareSelectedCipherData = data.into_inner().data;
@@ -740,7 +797,7 @@ async fn put_cipher_share_selected(
         };
         };
 
 
         match shared_cipher_data.Cipher.Id.take() {
         match shared_cipher_data.Cipher.Id.take() {
-            Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?,
+            Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &ip, &nt).await?,
             None => err!("Request missing ids field"),
             None => err!("Request missing ids field"),
         };
         };
     }
     }
@@ -753,6 +810,7 @@ async fn share_cipher_by_uuid(
     data: ShareCipherData,
     data: ShareCipherData,
     headers: &Headers,
     headers: &Headers,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
     nt: &Notify<'_>,
     nt: &Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
@@ -768,37 +826,30 @@ async fn share_cipher_by_uuid(
 
 
     let mut shared_to_collection = false;
     let mut shared_to_collection = false;
 
 
-    match data.Cipher.OrganizationId.clone() {
-        // If we don't get an organization ID, we don't do anything
-        // No error because this is used when using the Clone functionality
-        None => {}
-        Some(organization_uuid) => {
-            for uuid in &data.CollectionIds {
-                match Collection::find_by_uuid_and_org(uuid, &organization_uuid, conn).await {
-                    None => err!("Invalid collection ID provided"),
-                    Some(collection) => {
-                        if collection.is_writable_by_user(&headers.user.uuid, conn).await {
-                            CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
-                            shared_to_collection = true;
-                        } else {
-                            err!("No rights to modify the collection")
-                        }
+    if let Some(organization_uuid) = &data.Cipher.OrganizationId {
+        for uuid in &data.CollectionIds {
+            match Collection::find_by_uuid_and_org(uuid, organization_uuid, conn).await {
+                None => err!("Invalid collection ID provided"),
+                Some(collection) => {
+                    if collection.is_writable_by_user(&headers.user.uuid, conn).await {
+                        CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?;
+                        shared_to_collection = true;
+                    } else {
+                        err!("No rights to modify the collection")
                     }
                     }
                 }
                 }
             }
             }
         }
         }
     };
     };
 
 
-    update_cipher_from_data(
-        &mut cipher,
-        data.Cipher,
-        headers,
-        shared_to_collection,
-        conn,
-        nt,
-        UpdateType::CipherUpdate,
-    )
-    .await?;
+    // When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate.
+    let ut = if data.Cipher.LastKnownRevisionDate.is_some() {
+        UpdateType::CipherUpdate
+    } else {
+        UpdateType::CipherCreate
+    };
+
+    update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, ip, nt, ut).await?;
 
 
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
 }
 }
@@ -893,6 +944,7 @@ async fn save_attachment(
     data: Form<UploadData<'_>>,
     data: Form<UploadData<'_>>,
     headers: &Headers,
     headers: &Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> Result<(Cipher, DbConn), crate::error::Error> {
 ) -> Result<(Cipher, DbConn), crate::error::Error> {
     let cipher = match Cipher::find_by_uuid(&cipher_uuid, &mut conn).await {
     let cipher = match Cipher::find_by_uuid(&cipher_uuid, &mut conn).await {
@@ -1011,6 +1063,19 @@ async fn save_attachment(
 
 
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&mut conn).await).await;
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(&mut conn).await).await;
 
 
+    if let Some(org_uuid) = &cipher.organization_uuid {
+        log_event(
+            EventType::CipherAttachmentCreated as i32,
+            &cipher.uuid,
+            String::from(org_uuid),
+            headers.user.uuid.clone(),
+            headers.device.atype,
+            &ip.ip,
+            &mut conn,
+        )
+        .await;
+    }
+
     Ok((cipher, conn))
     Ok((cipher, conn))
 }
 }
 
 
@@ -1025,6 +1090,7 @@ async fn post_attachment_v2_data(
     data: Form<UploadData<'_>>,
     data: Form<UploadData<'_>>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await {
     let attachment = match Attachment::find_by_id(&attachment_id, &mut conn).await {
@@ -1033,7 +1099,7 @@ async fn post_attachment_v2_data(
         None => err!("Attachment doesn't exist"),
         None => err!("Attachment doesn't exist"),
     };
     };
 
 
-    save_attachment(attachment, uuid, data, &headers, conn, nt).await?;
+    save_attachment(attachment, uuid, data, &headers, conn, ip, nt).await?;
 
 
     Ok(())
     Ok(())
 }
 }
@@ -1045,13 +1111,14 @@ async fn post_attachment(
     data: Form<UploadData<'_>>,
     data: Form<UploadData<'_>>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     // Setting this as None signifies to save_attachment() that it should create
     // Setting this as None signifies to save_attachment() that it should create
     // the attachment database record as well as saving the data to disk.
     // the attachment database record as well as saving the data to disk.
     let attachment = None;
     let attachment = None;
 
 
-    let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?;
+    let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, ip, nt).await?;
 
 
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, &mut conn).await))
 }
 }
@@ -1062,9 +1129,10 @@ async fn post_attachment_admin(
     data: Form<UploadData<'_>>,
     data: Form<UploadData<'_>>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    post_attachment(uuid, data, headers, conn, nt).await
+    post_attachment(uuid, data, headers, conn, ip, nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")]
@@ -1074,10 +1142,11 @@ async fn post_attachment_share(
     data: Form<UploadData<'_>>,
     data: Form<UploadData<'_>>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await?;
-    post_attachment(uuid, data, headers, conn, nt).await
+    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await?;
+    post_attachment(uuid, data, headers, conn, ip, nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
@@ -1086,9 +1155,10 @@ async fn delete_attachment_post_admin(
     attachment_id: String,
     attachment_id: String,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_attachment(uuid, attachment_id, headers, conn, nt).await
+    delete_attachment(uuid, attachment_id, headers, conn, ip, nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
 #[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
@@ -1097,9 +1167,10 @@ async fn delete_attachment_post(
     attachment_id: String,
     attachment_id: String,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_attachment(uuid, attachment_id, headers, conn, nt).await
+    delete_attachment(uuid, attachment_id, headers, conn, ip, nt).await
 }
 }
 
 
 #[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
 #[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
@@ -1108,9 +1179,10 @@ async fn delete_attachment(
     attachment_id: String,
     attachment_id: String,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await
+    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
 #[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
@@ -1119,39 +1191,70 @@ async fn delete_attachment_admin(
     attachment_id: String,
     attachment_id: String,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &nt).await
+    _delete_cipher_attachment_by_id(&uuid, &attachment_id, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[post("/ciphers/<uuid>/delete")]
 #[post("/ciphers/<uuid>/delete")]
-async fn delete_cipher_post(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await
+async fn delete_cipher_post(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete
 }
 }
 
 
 #[post("/ciphers/<uuid>/delete-admin")]
 #[post("/ciphers/<uuid>/delete-admin")]
-async fn delete_cipher_post_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await
+async fn delete_cipher_post_admin(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete
 }
 }
 
 
 #[put("/ciphers/<uuid>/delete")]
 #[put("/ciphers/<uuid>/delete")]
-async fn delete_cipher_put(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &nt).await
+async fn delete_cipher_put(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &ip, &nt).await // soft delete
 }
 }
 
 
 #[put("/ciphers/<uuid>/delete-admin")]
 #[put("/ciphers/<uuid>/delete-admin")]
-async fn delete_cipher_put_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &nt).await
+async fn delete_cipher_put_admin(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, true, &ip, &nt).await
 }
 }
 
 
 #[delete("/ciphers/<uuid>")]
 #[delete("/ciphers/<uuid>")]
-async fn delete_cipher(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await
+async fn delete_cipher(uuid: String, headers: Headers, mut conn: DbConn, ip: ClientIp, nt: Notify<'_>) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete
 }
 }
 
 
 #[delete("/ciphers/<uuid>/admin")]
 #[delete("/ciphers/<uuid>/admin")]
-async fn delete_cipher_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
-    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &nt).await
+async fn delete_cipher_admin(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> EmptyResult {
+    _delete_cipher_by_uuid(&uuid, &headers, &mut conn, false, &ip, &nt).await // permanent delete
 }
 }
 
 
 #[delete("/ciphers", data = "<data>")]
 #[delete("/ciphers", data = "<data>")]
@@ -1159,9 +1262,10 @@ async fn delete_cipher_selected(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _delete_multiple_ciphers(data, headers, conn, false, nt).await
+    _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete
 }
 }
 
 
 #[post("/ciphers/delete", data = "<data>")]
 #[post("/ciphers/delete", data = "<data>")]
@@ -1169,9 +1273,10 @@ async fn delete_cipher_selected_post(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _delete_multiple_ciphers(data, headers, conn, false, nt).await
+    _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete
 }
 }
 
 
 #[put("/ciphers/delete", data = "<data>")]
 #[put("/ciphers/delete", data = "<data>")]
@@ -1179,9 +1284,10 @@ async fn delete_cipher_selected_put(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _delete_multiple_ciphers(data, headers, conn, true, nt).await // soft delete
+    _delete_multiple_ciphers(data, headers, conn, true, ip, nt).await // soft delete
 }
 }
 
 
 #[delete("/ciphers/admin", data = "<data>")]
 #[delete("/ciphers/admin", data = "<data>")]
@@ -1189,9 +1295,10 @@ async fn delete_cipher_selected_admin(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_cipher_selected(data, headers, conn, nt).await
+    _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete
 }
 }
 
 
 #[post("/ciphers/delete-admin", data = "<data>")]
 #[post("/ciphers/delete-admin", data = "<data>")]
@@ -1199,9 +1306,10 @@ async fn delete_cipher_selected_post_admin(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_cipher_selected_post(data, headers, conn, nt).await
+    _delete_multiple_ciphers(data, headers, conn, false, ip, nt).await // permanent delete
 }
 }
 
 
 #[put("/ciphers/delete-admin", data = "<data>")]
 #[put("/ciphers/delete-admin", data = "<data>")]
@@ -1209,19 +1317,32 @@ async fn delete_cipher_selected_put_admin(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_cipher_selected_put(data, headers, conn, nt).await
+    _delete_multiple_ciphers(data, headers, conn, true, ip, nt).await // soft delete
 }
 }
 
 
 #[put("/ciphers/<uuid>/restore")]
 #[put("/ciphers/<uuid>/restore")]
-async fn restore_cipher_put(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
-    _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &nt).await
+async fn restore_cipher_put(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> JsonResult {
+    _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[put("/ciphers/<uuid>/restore-admin")]
 #[put("/ciphers/<uuid>/restore-admin")]
-async fn restore_cipher_put_admin(uuid: String, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
-    _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &nt).await
+async fn restore_cipher_put_admin(
+    uuid: String,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+    nt: Notify<'_>,
+) -> JsonResult {
+    _restore_cipher_by_uuid(&uuid, &headers, &mut conn, &ip, &nt).await
 }
 }
 
 
 #[put("/ciphers/restore", data = "<data>")]
 #[put("/ciphers/restore", data = "<data>")]
@@ -1229,9 +1350,10 @@ async fn restore_cipher_selected(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
-    _restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
+    _restore_multiple_ciphers(data, &headers, &mut conn, ip, &nt).await
 }
 }
 
 
 #[derive(Deserialize)]
 #[derive(Deserialize)]
@@ -1303,6 +1425,7 @@ async fn delete_all(
     data: JsonUpcase<PasswordData>,
     data: JsonUpcase<PasswordData>,
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: PasswordData = data.into_inner().data;
     let data: PasswordData = data.into_inner().data;
@@ -1323,6 +1446,18 @@ async fn delete_all(
                     if user_org.atype == UserOrgType::Owner {
                     if user_org.atype == UserOrgType::Owner {
                         Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?;
                         Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?;
                         nt.send_user_update(UpdateType::Vault, &user).await;
                         nt.send_user_update(UpdateType::Vault, &user).await;
+
+                        log_event(
+                            EventType::OrganizationPurgedVault as i32,
+                            &org_data.org_id,
+                            org_data.org_id.clone(),
+                            user.uuid,
+                            headers.device.atype,
+                            &ip.ip,
+                            &mut conn,
+                        )
+                        .await;
+
                         Ok(())
                         Ok(())
                     } else {
                     } else {
                         err!("You don't have permission to purge the organization vault");
                         err!("You don't have permission to purge the organization vault");
@@ -1354,6 +1489,7 @@ async fn _delete_cipher_by_uuid(
     headers: &Headers,
     headers: &Headers,
     conn: &mut DbConn,
     conn: &mut DbConn,
     soft_delete: bool,
     soft_delete: bool,
+    ip: &ClientIp,
     nt: &Notify<'_>,
     nt: &Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
@@ -1374,6 +1510,16 @@ async fn _delete_cipher_by_uuid(
         nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn).await).await;
         nt.send_cipher_update(UpdateType::CipherDelete, &cipher, &cipher.update_users_revision(conn).await).await;
     }
     }
 
 
+    if let Some(org_uuid) = cipher.organization_uuid {
+        let event_type = match soft_delete {
+            true => EventType::CipherSoftDeleted as i32,
+            false => EventType::CipherDeleted as i32,
+        };
+
+        log_event(event_type, &cipher.uuid, org_uuid, headers.user.uuid.clone(), headers.device.atype, &ip.ip, conn)
+            .await;
+    }
+
     Ok(())
     Ok(())
 }
 }
 
 
@@ -1382,6 +1528,7 @@ async fn _delete_multiple_ciphers(
     headers: Headers,
     headers: Headers,
     mut conn: DbConn,
     mut conn: DbConn,
     soft_delete: bool,
     soft_delete: bool,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: Value = data.into_inner().data;
     let data: Value = data.into_inner().data;
@@ -1395,7 +1542,7 @@ async fn _delete_multiple_ciphers(
     };
     };
 
 
     for uuid in uuids {
     for uuid in uuids {
-        if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &nt).await {
+        if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &ip, &nt).await {
             return error;
             return error;
         };
         };
     }
     }
@@ -1403,7 +1550,13 @@ async fn _delete_multiple_ciphers(
     Ok(())
     Ok(())
 }
 }
 
 
-async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>) -> JsonResult {
+async fn _restore_cipher_by_uuid(
+    uuid: &str,
+    headers: &Headers,
+    conn: &mut DbConn,
+    ip: &ClientIp,
+    nt: &Notify<'_>,
+) -> JsonResult {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
     let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
         Some(cipher) => cipher,
         Some(cipher) => cipher,
         None => err!("Cipher doesn't exist"),
         None => err!("Cipher doesn't exist"),
@@ -1417,6 +1570,19 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon
     cipher.save(conn).await?;
     cipher.save(conn).await?;
 
 
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await;
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await;
+    if let Some(org_uuid) = &cipher.organization_uuid {
+        log_event(
+            EventType::CipherRestored as i32,
+            &cipher.uuid.clone(),
+            String::from(org_uuid),
+            headers.user.uuid.clone(),
+            headers.device.atype,
+            &ip.ip,
+            conn,
+        )
+        .await;
+    }
+
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
     Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, conn).await))
 }
 }
 
 
@@ -1424,6 +1590,7 @@ async fn _restore_multiple_ciphers(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: &Headers,
     headers: &Headers,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: ClientIp,
     nt: &Notify<'_>,
     nt: &Notify<'_>,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: Value = data.into_inner().data;
     let data: Value = data.into_inner().data;
@@ -1438,7 +1605,7 @@ async fn _restore_multiple_ciphers(
 
 
     let mut ciphers: Vec<Value> = Vec::new();
     let mut ciphers: Vec<Value> = Vec::new();
     for uuid in uuids {
     for uuid in uuids {
-        match _restore_cipher_by_uuid(uuid, headers, conn, nt).await {
+        match _restore_cipher_by_uuid(uuid, headers, conn, &ip, nt).await {
             Ok(json) => ciphers.push(json.into_inner()),
             Ok(json) => ciphers.push(json.into_inner()),
             err => return err,
             err => return err,
         }
         }
@@ -1456,6 +1623,7 @@ async fn _delete_cipher_attachment_by_id(
     attachment_id: &str,
     attachment_id: &str,
     headers: &Headers,
     headers: &Headers,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
     nt: &Notify<'_>,
     nt: &Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let attachment = match Attachment::find_by_id(attachment_id, conn).await {
     let attachment = match Attachment::find_by_id(attachment_id, conn).await {
@@ -1479,6 +1647,18 @@ async fn _delete_cipher_attachment_by_id(
     // Delete attachment
     // Delete attachment
     attachment.delete(conn).await?;
     attachment.delete(conn).await?;
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await;
     nt.send_cipher_update(UpdateType::CipherUpdate, &cipher, &cipher.update_users_revision(conn).await).await;
+    if let Some(org_uuid) = cipher.organization_uuid {
+        log_event(
+            EventType::CipherAttachmentDeleted as i32,
+            &cipher.uuid,
+            org_uuid,
+            headers.user.uuid.clone(),
+            headers.device.atype,
+            &ip.ip,
+            conn,
+        )
+        .await;
+    }
     Ok(())
     Ok(())
 }
 }
 
 

+ 341 - 0
src/api/core/events.rs

@@ -0,0 +1,341 @@
+use std::net::IpAddr;
+
+use chrono::NaiveDateTime;
+use rocket::{form::FromForm, serde::json::Json, Route};
+use serde_json::Value;
+
+use crate::{
+    api::{EmptyResult, JsonResult, JsonUpcaseVec},
+    auth::{AdminHeaders, ClientIp, Headers},
+    db::{
+        models::{Cipher, Event, UserOrganization},
+        DbConn, DbPool,
+    },
+    util::parse_date,
+    CONFIG,
+};
+
+/// ###############################################################################################################
+/// /api routes
+pub fn routes() -> Vec<Route> {
+    routes![get_org_events, get_cipher_events, get_user_events,]
+}
+
+#[derive(FromForm)]
+#[allow(non_snake_case)]
+struct EventRange {
+    start: String,
+    end: String,
+    #[field(name = "continuationToken")]
+    continuation_token: Option<String>,
+}
+
+// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
+#[get("/organizations/<org_id>/events?<data..>")]
+async fn get_org_events(org_id: String, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult {
+    // Return an empty vec when we org events are disabled.
+    // This prevents client errors
+    let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
+        Vec::with_capacity(0)
+    } else {
+        let start_date = parse_date(&data.start);
+        let end_date = if let Some(before_date) = &data.continuation_token {
+            parse_date(before_date)
+        } else {
+            parse_date(&data.end)
+        };
+
+        Event::find_by_organization_uuid(&org_id, &start_date, &end_date, &mut conn)
+            .await
+            .iter()
+            .map(|e| e.to_json())
+            .collect()
+    };
+
+    Ok(Json(json!({
+        "Data": events_json,
+        "Object": "list",
+        "ContinuationToken": get_continuation_token(&events_json),
+    })))
+}
+
+#[get("/ciphers/<cipher_id>/events?<data..>")]
+async fn get_cipher_events(cipher_id: String, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult {
+    // Return an empty vec when we org events are disabled.
+    // This prevents client errors
+    let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
+        Vec::with_capacity(0)
+    } else {
+        let mut events_json = Vec::with_capacity(0);
+        if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, &cipher_id, &mut conn).await {
+            let start_date = parse_date(&data.start);
+            let end_date = if let Some(before_date) = &data.continuation_token {
+                parse_date(before_date)
+            } else {
+                parse_date(&data.end)
+            };
+
+            events_json = Event::find_by_cipher_uuid(&cipher_id, &start_date, &end_date, &mut conn)
+                .await
+                .iter()
+                .map(|e| e.to_json())
+                .collect()
+        }
+        events_json
+    };
+
+    Ok(Json(json!({
+        "Data": events_json,
+        "Object": "list",
+        "ContinuationToken": get_continuation_token(&events_json),
+    })))
+}
+
+#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
+async fn get_user_events(
+    org_id: String,
+    user_org_id: String,
+    data: EventRange,
+    _headers: AdminHeaders,
+    mut conn: DbConn,
+) -> JsonResult {
+    // Return an empty vec when we org events are disabled.
+    // This prevents client errors
+    let events_json: Vec<Value> = if !CONFIG.org_events_enabled() {
+        Vec::with_capacity(0)
+    } else {
+        let start_date = parse_date(&data.start);
+        let end_date = if let Some(before_date) = &data.continuation_token {
+            parse_date(before_date)
+        } else {
+            parse_date(&data.end)
+        };
+
+        Event::find_by_org_and_user_org(&org_id, &user_org_id, &start_date, &end_date, &mut conn)
+            .await
+            .iter()
+            .map(|e| e.to_json())
+            .collect()
+    };
+
+    Ok(Json(json!({
+        "Data": events_json,
+        "Object": "list",
+        "ContinuationToken": get_continuation_token(&events_json),
+    })))
+}
+
+fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
+    // When the length of the vec equals the max page_size there probably is more data
+    // When it is less, then all events are loaded.
+    if events_json.len() as i64 == Event::PAGE_SIZE {
+        if let Some(last_event) = events_json.last() {
+            last_event["date"].as_str()
+        } else {
+            None
+        }
+    } else {
+        None
+    }
+}
+
+/// ###############################################################################################################
+/// /events routes
+pub fn main_routes() -> Vec<Route> {
+    routes![post_events_collect,]
+}
+
+#[derive(Deserialize, Debug)]
+#[allow(non_snake_case)]
+struct EventCollection {
+    // Mandatory
+    Type: i32,
+    Date: String,
+
+    // Optional
+    CipherId: Option<String>,
+    OrganizationId: Option<String>,
+}
+
+// Upstream:
+// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
+// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
+#[post("/collect", format = "application/json", data = "<data>")]
+async fn post_events_collect(
+    data: JsonUpcaseVec<EventCollection>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
+    if !CONFIG.org_events_enabled() {
+        return Ok(());
+    }
+
+    for event in data.iter().map(|d| &d.data) {
+        let event_date = parse_date(&event.Date);
+        match event.Type {
+            1000..=1099 => {
+                _log_user_event(
+                    event.Type,
+                    &headers.user.uuid,
+                    headers.device.atype,
+                    Some(event_date),
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
+            }
+            1600..=1699 => {
+                if let Some(org_uuid) = &event.OrganizationId {
+                    _log_event(
+                        event.Type,
+                        org_uuid,
+                        String::from(org_uuid),
+                        &headers.user.uuid,
+                        headers.device.atype,
+                        Some(event_date),
+                        &ip.ip,
+                        &mut conn,
+                    )
+                    .await;
+                }
+            }
+            _ => {
+                if let Some(cipher_uuid) = &event.CipherId {
+                    if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
+                        if let Some(org_uuid) = cipher.organization_uuid {
+                            _log_event(
+                                event.Type,
+                                cipher_uuid,
+                                org_uuid,
+                                &headers.user.uuid,
+                                headers.device.atype,
+                                Some(event_date),
+                                &ip.ip,
+                                &mut conn,
+                            )
+                            .await;
+                        }
+                    }
+                }
+            }
+        }
+    }
+    Ok(())
+}
+
+pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) {
+    if !CONFIG.org_events_enabled() {
+        return;
+    }
+    _log_user_event(event_type, user_uuid, device_type, None, ip, conn).await;
+}
+
+async fn _log_user_event(
+    event_type: i32,
+    user_uuid: &str,
+    device_type: i32,
+    event_date: Option<NaiveDateTime>,
+    ip: &IpAddr,
+    conn: &mut DbConn,
+) {
+    let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
+    let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org
+
+    // Upstream saves the event also without any org_uuid.
+    let mut event = Event::new(event_type, event_date);
+    event.user_uuid = Some(String::from(user_uuid));
+    event.act_user_uuid = Some(String::from(user_uuid));
+    event.device_type = Some(device_type);
+    event.ip_address = Some(ip.to_string());
+    events.push(event);
+
+    // For each org a user is a member of store these events per org
+    for org_uuid in orgs {
+        let mut event = Event::new(event_type, event_date);
+        event.user_uuid = Some(String::from(user_uuid));
+        event.org_uuid = Some(org_uuid);
+        event.act_user_uuid = Some(String::from(user_uuid));
+        event.device_type = Some(device_type);
+        event.ip_address = Some(ip.to_string());
+        events.push(event);
+    }
+
+    Event::save_user_event(events, conn).await.unwrap_or(());
+}
+
+pub async fn log_event(
+    event_type: i32,
+    source_uuid: &str,
+    org_uuid: String,
+    act_user_uuid: String,
+    device_type: i32,
+    ip: &IpAddr,
+    conn: &mut DbConn,
+) {
+    if !CONFIG.org_events_enabled() {
+        return;
+    }
+    _log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await;
+}
+
+#[allow(clippy::too_many_arguments)]
+async fn _log_event(
+    event_type: i32,
+    source_uuid: &str,
+    org_uuid: String,
+    act_user_uuid: &str,
+    device_type: i32,
+    event_date: Option<NaiveDateTime>,
+    ip: &IpAddr,
+    conn: &mut DbConn,
+) {
+    // Create a new empty event
+    let mut event = Event::new(event_type, event_date);
+    match event_type {
+        // 1000..=1099 Are user events, they need to be logged via log_user_event()
+        // Collection Events
+        1100..=1199 => {
+            event.cipher_uuid = Some(String::from(source_uuid));
+        }
+        // Collection Events
+        1300..=1399 => {
+            event.collection_uuid = Some(String::from(source_uuid));
+        }
+        // Group Events
+        1400..=1499 => {
+            event.group_uuid = Some(String::from(source_uuid));
+        }
+        // Org User Events
+        1500..=1599 => {
+            event.org_user_uuid = Some(String::from(source_uuid));
+        }
+        // 1600..=1699 Are organizational events, and they do not need the source_uuid
+        // Policy Events
+        1700..=1799 => {
+            event.policy_uuid = Some(String::from(source_uuid));
+        }
+        // Ignore others
+        _ => {}
+    }
+
+    event.org_uuid = Some(org_uuid);
+    event.act_user_uuid = Some(String::from(act_user_uuid));
+    event.device_type = Some(device_type);
+    event.ip_address = Some(ip.to_string());
+    event.save(conn).await.unwrap_or(());
+}
+
+pub async fn event_cleanup_job(pool: DbPool) {
+    debug!("Start events cleanup job");
+    if CONFIG.events_days_retain().is_none() {
+        debug!("events_days_retain is not configured, abort");
+        return;
+    }
+
+    if let Ok(mut conn) = pool.get().await {
+        Event::clean_events(&mut conn).await.ok();
+    } else {
+        error!("Failed to get DB connection while trying to cleanup the events table")
+    }
+}

+ 10 - 0
src/api/core/mod.rs

@@ -1,6 +1,7 @@
 pub mod accounts;
 pub mod accounts;
 mod ciphers;
 mod ciphers;
 mod emergency_access;
 mod emergency_access;
+mod events;
 mod folders;
 mod folders;
 mod organizations;
 mod organizations;
 mod sends;
 mod sends;
@@ -9,6 +10,7 @@ pub mod two_factor;
 pub use ciphers::purge_trashed_ciphers;
 pub use ciphers::purge_trashed_ciphers;
 pub use ciphers::{CipherSyncData, CipherSyncType};
 pub use ciphers::{CipherSyncData, CipherSyncType};
 pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
 pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
+pub use events::{event_cleanup_job, log_event, log_user_event};
 pub use sends::purge_sends;
 pub use sends::purge_sends;
 pub use two_factor::send_incomplete_2fa_notifications;
 pub use two_factor::send_incomplete_2fa_notifications;
 
 
@@ -22,6 +24,7 @@ pub fn routes() -> Vec<Route> {
     routes.append(&mut accounts::routes());
     routes.append(&mut accounts::routes());
     routes.append(&mut ciphers::routes());
     routes.append(&mut ciphers::routes());
     routes.append(&mut emergency_access::routes());
     routes.append(&mut emergency_access::routes());
+    routes.append(&mut events::routes());
     routes.append(&mut folders::routes());
     routes.append(&mut folders::routes());
     routes.append(&mut organizations::routes());
     routes.append(&mut organizations::routes());
     routes.append(&mut two_factor::routes());
     routes.append(&mut two_factor::routes());
@@ -34,6 +37,13 @@ pub fn routes() -> Vec<Route> {
     routes
     routes
 }
 }
 
 
+pub fn events_routes() -> Vec<Route> {
+    let mut routes = Vec::new();
+    routes.append(&mut events::main_routes());
+
+    routes
+}
+
 //
 //
 // Move this somewhere else
 // Move this somewhere else
 //
 //

+ 397 - 70
src/api/core/organizations.rs

@@ -5,11 +5,11 @@ use serde_json::Value;
 
 
 use crate::{
 use crate::{
     api::{
     api::{
-        core::{CipherSyncData, CipherSyncType},
+        core::{log_event, CipherSyncData, CipherSyncType},
         ApiResult, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData,
         ApiResult, EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData,
         UpdateType,
         UpdateType,
     },
     },
-    auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
+    auth::{decode_invite, AdminHeaders, ClientIp, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
     db::{models::*, DbConn},
     db::{models::*, DbConn},
     error::Error,
     error::Error,
     mail,
     mail,
@@ -203,7 +203,7 @@ async fn post_delete_organization(
 }
 }
 
 
 #[post("/organizations/<org_id>/leave")]
 #[post("/organizations/<org_id>/leave")]
-async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn) -> EmptyResult {
+async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn, ip: ClientIp) -> EmptyResult {
     match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await {
     match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, &mut conn).await {
         None => err!("User not part of organization"),
         None => err!("User not part of organization"),
         Some(user_org) => {
         Some(user_org) => {
@@ -213,6 +213,17 @@ async fn leave_organization(org_id: String, headers: Headers, mut conn: DbConn)
                 err!("The last owner can't leave")
                 err!("The last owner can't leave")
             }
             }
 
 
+            log_event(
+                EventType::OrganizationUserRemoved as i32,
+                &user_org.uuid,
+                org_id,
+                headers.user.uuid.clone(),
+                headers.device.atype,
+                &ip.ip,
+                &mut conn,
+            )
+            .await;
+
             user_org.delete(&mut conn).await
             user_org.delete(&mut conn).await
         }
         }
     }
     }
@@ -232,16 +243,18 @@ async fn put_organization(
     headers: OwnerHeaders,
     headers: OwnerHeaders,
     data: JsonUpcase<OrganizationUpdateData>,
     data: JsonUpcase<OrganizationUpdateData>,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
-    post_organization(org_id, headers, data, conn).await
+    post_organization(org_id, headers, data, conn, ip).await
 }
 }
 
 
 #[post("/organizations/<org_id>", data = "<data>")]
 #[post("/organizations/<org_id>", data = "<data>")]
 async fn post_organization(
 async fn post_organization(
     org_id: String,
     org_id: String,
-    _headers: OwnerHeaders,
+    headers: OwnerHeaders,
     data: JsonUpcase<OrganizationUpdateData>,
     data: JsonUpcase<OrganizationUpdateData>,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: OrganizationUpdateData = data.into_inner().data;
     let data: OrganizationUpdateData = data.into_inner().data;
 
 
@@ -254,6 +267,18 @@ async fn post_organization(
     org.billing_email = data.BillingEmail;
     org.billing_email = data.BillingEmail;
 
 
     org.save(&mut conn).await?;
     org.save(&mut conn).await?;
+
+    log_event(
+        EventType::OrganizationUpdated as i32,
+        &org_id,
+        org_id.clone(),
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     Ok(Json(org.to_json()))
     Ok(Json(org.to_json()))
 }
 }
 
 
@@ -290,6 +315,7 @@ async fn post_organization_collections(
     headers: ManagerHeadersLoose,
     headers: ManagerHeadersLoose,
     data: JsonUpcase<NewCollectionData>,
     data: JsonUpcase<NewCollectionData>,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: NewCollectionData = data.into_inner().data;
     let data: NewCollectionData = data.into_inner().data;
 
 
@@ -307,6 +333,17 @@ async fn post_organization_collections(
     let collection = Collection::new(org.uuid, data.Name);
     let collection = Collection::new(org.uuid, data.Name);
     collection.save(&mut conn).await?;
     collection.save(&mut conn).await?;
 
 
+    log_event(
+        EventType::CollectionCreated as i32,
+        &collection.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     for group in data.Groups {
     for group in data.Groups {
         CollectionGroup::new(collection.uuid.clone(), group.Id, group.ReadOnly, group.HidePasswords)
         CollectionGroup::new(collection.uuid.clone(), group.Id, group.ReadOnly, group.HidePasswords)
             .save(&mut conn)
             .save(&mut conn)
@@ -330,17 +367,19 @@ async fn put_organization_collection_update(
     headers: ManagerHeaders,
     headers: ManagerHeaders,
     data: JsonUpcase<NewCollectionData>,
     data: JsonUpcase<NewCollectionData>,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
-    post_organization_collection_update(org_id, col_id, headers, data, conn).await
+    post_organization_collection_update(org_id, col_id, headers, data, conn, ip).await
 }
 }
 
 
 #[post("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
 #[post("/organizations/<org_id>/collections/<col_id>", data = "<data>")]
 async fn post_organization_collection_update(
 async fn post_organization_collection_update(
     org_id: String,
     org_id: String,
     col_id: String,
     col_id: String,
-    _headers: ManagerHeaders,
+    headers: ManagerHeaders,
     data: JsonUpcase<NewCollectionData>,
     data: JsonUpcase<NewCollectionData>,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: NewCollectionData = data.into_inner().data;
     let data: NewCollectionData = data.into_inner().data;
 
 
@@ -361,6 +400,17 @@ async fn post_organization_collection_update(
     collection.name = data.Name;
     collection.name = data.Name;
     collection.save(&mut conn).await?;
     collection.save(&mut conn).await?;
 
 
+    log_event(
+        EventType::CollectionUpdated as i32,
+        &collection.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?;
     CollectionGroup::delete_all_by_collection(&col_id, &mut conn).await?;
 
 
     for group in data.Groups {
     for group in data.Groups {
@@ -415,13 +465,24 @@ async fn post_organization_collection_delete_user(
 async fn delete_organization_collection(
 async fn delete_organization_collection(
     org_id: String,
     org_id: String,
     col_id: String,
     col_id: String,
-    _headers: ManagerHeaders,
+    headers: ManagerHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     match Collection::find_by_uuid(&col_id, &mut conn).await {
     match Collection::find_by_uuid(&col_id, &mut conn).await {
         None => err!("Collection not found"),
         None => err!("Collection not found"),
         Some(collection) => {
         Some(collection) => {
             if collection.org_uuid == org_id {
             if collection.org_uuid == org_id {
+                log_event(
+                    EventType::CollectionDeleted as i32,
+                    &collection.uuid,
+                    org_id,
+                    headers.user.uuid.clone(),
+                    headers.device.atype,
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
                 collection.delete(&mut conn).await
                 collection.delete(&mut conn).await
             } else {
             } else {
                 err!("Collection and Organization id do not match")
                 err!("Collection and Organization id do not match")
@@ -444,8 +505,9 @@ async fn post_organization_collection_delete(
     headers: ManagerHeaders,
     headers: ManagerHeaders,
     _data: JsonUpcase<DeleteCollectionData>,
     _data: JsonUpcase<DeleteCollectionData>,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_organization_collection(org_id, col_id, headers, conn).await
+    delete_organization_collection(org_id, col_id, headers, conn, ip).await
 }
 }
 
 
 #[get("/organizations/<org_id>/collections/<coll_id>/details")]
 #[get("/organizations/<org_id>/collections/<coll_id>/details")]
@@ -632,6 +694,7 @@ async fn send_invite(
     data: JsonUpcase<InviteData>,
     data: JsonUpcase<InviteData>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: InviteData = data.into_inner().data;
     let data: InviteData = data.into_inner().data;
 
 
@@ -700,6 +763,17 @@ async fn send_invite(
 
 
         new_user.save(&mut conn).await?;
         new_user.save(&mut conn).await?;
 
 
+        log_event(
+            EventType::OrganizationUserInvited as i32,
+            &new_user.uuid,
+            org_id.clone(),
+            headers.user.uuid.clone(),
+            headers.device.atype,
+            &ip.ip,
+            &mut conn,
+        )
+        .await;
+
         if CONFIG.mail_enabled() {
         if CONFIG.mail_enabled() {
             let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await {
             let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await {
                 Some(org) => org.name,
                 Some(org) => org.name,
@@ -882,6 +956,7 @@ async fn bulk_confirm_invite(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
     let data = data.into_inner().data;
     let data = data.into_inner().data;
 
 
@@ -891,7 +966,7 @@ async fn bulk_confirm_invite(
             for invite in keys {
             for invite in keys {
                 let org_user_id = invite["Id"].as_str().unwrap_or_default();
                 let org_user_id = invite["Id"].as_str().unwrap_or_default();
                 let user_key = invite["Key"].as_str().unwrap_or_default();
                 let user_key = invite["Key"].as_str().unwrap_or_default();
-                let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &mut conn).await {
+                let err_msg = match _confirm_invite(&org_id, org_user_id, user_key, &headers, &mut conn, &ip).await {
                     Ok(_) => String::new(),
                     Ok(_) => String::new(),
                     Err(e) => format!("{:?}", e),
                     Err(e) => format!("{:?}", e),
                 };
                 };
@@ -922,10 +997,11 @@ async fn confirm_invite(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data = data.into_inner().data;
     let data = data.into_inner().data;
     let user_key = data["Key"].as_str().unwrap_or_default();
     let user_key = data["Key"].as_str().unwrap_or_default();
-    _confirm_invite(&org_id, &org_user_id, user_key, &headers, &mut conn).await
+    _confirm_invite(&org_id, &org_user_id, user_key, &headers, &mut conn, &ip).await
 }
 }
 
 
 async fn _confirm_invite(
 async fn _confirm_invite(
@@ -934,6 +1010,7 @@ async fn _confirm_invite(
     key: &str,
     key: &str,
     headers: &AdminHeaders,
     headers: &AdminHeaders,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     if key.is_empty() || org_user_id.is_empty() {
     if key.is_empty() || org_user_id.is_empty() {
         err!("Key or UserId is not set, unable to process request");
         err!("Key or UserId is not set, unable to process request");
@@ -969,6 +1046,17 @@ async fn _confirm_invite(
     user_to_confirm.status = UserOrgStatus::Confirmed as i32;
     user_to_confirm.status = UserOrgStatus::Confirmed as i32;
     user_to_confirm.akey = key.to_string();
     user_to_confirm.akey = key.to_string();
 
 
+    log_event(
+        EventType::OrganizationUserConfirmed as i32,
+        &user_to_confirm.uuid,
+        String::from(org_id),
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        conn,
+    )
+    .await;
+
     if CONFIG.mail_enabled() {
     if CONFIG.mail_enabled() {
         let org_name = match Organization::find_by_uuid(org_id, conn).await {
         let org_name = match Organization::find_by_uuid(org_id, conn).await {
             Some(org) => org.name,
             Some(org) => org.name,
@@ -1009,8 +1097,9 @@ async fn put_organization_user(
     data: JsonUpcase<EditUserData>,
     data: JsonUpcase<EditUserData>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    edit_user(org_id, org_user_id, data, headers, conn).await
+    edit_user(org_id, org_user_id, data, headers, conn, ip).await
 }
 }
 
 
 #[post("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
 #[post("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)]
@@ -1020,6 +1109,7 @@ async fn edit_user(
     data: JsonUpcase<EditUserData>,
     data: JsonUpcase<EditUserData>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: EditUserData = data.into_inner().data;
     let data: EditUserData = data.into_inner().data;
 
 
@@ -1095,6 +1185,17 @@ async fn edit_user(
         }
         }
     }
     }
 
 
+    log_event(
+        EventType::OrganizationUserUpdated as i32,
+        &user_to_edit.uuid,
+        org_id.clone(),
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     user_to_edit.save(&mut conn).await
     user_to_edit.save(&mut conn).await
 }
 }
 
 
@@ -1104,12 +1205,13 @@ async fn bulk_delete_user(
     data: JsonUpcase<OrgBulkIds>,
     data: JsonUpcase<OrgBulkIds>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
     let data: OrgBulkIds = data.into_inner().data;
     let data: OrgBulkIds = data.into_inner().data;
 
 
     let mut bulk_response = Vec::new();
     let mut bulk_response = Vec::new();
     for org_user_id in data.Ids {
     for org_user_id in data.Ids {
-        let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &mut conn).await {
+        let err_msg = match _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await {
             Ok(_) => String::new(),
             Ok(_) => String::new(),
             Err(e) => format!("{:?}", e),
             Err(e) => format!("{:?}", e),
         };
         };
@@ -1131,11 +1233,34 @@ async fn bulk_delete_user(
 }
 }
 
 
 #[delete("/organizations/<org_id>/users/<org_user_id>")]
 #[delete("/organizations/<org_id>/users/<org_user_id>")]
-async fn delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult {
-    _delete_user(&org_id, &org_user_id, &headers, &mut conn).await
+async fn delete_user(
+    org_id: String,
+    org_user_id: String,
+    headers: AdminHeaders,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
+    _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
+}
+
+#[post("/organizations/<org_id>/users/<org_user_id>/delete")]
+async fn post_delete_user(
+    org_id: String,
+    org_user_id: String,
+    headers: AdminHeaders,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
+    _delete_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
 }
 }
 
 
-async fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, conn: &mut DbConn) -> EmptyResult {
+async fn _delete_user(
+    org_id: &str,
+    org_user_id: &str,
+    headers: &AdminHeaders,
+    conn: &mut DbConn,
+    ip: &ClientIp,
+) -> EmptyResult {
     let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
     let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
         Some(user) => user,
         Some(user) => user,
         None => err!("User to delete isn't member of the organization"),
         None => err!("User to delete isn't member of the organization"),
@@ -1152,12 +1277,18 @@ async fn _delete_user(org_id: &str, org_user_id: &str, headers: &AdminHeaders, c
         }
         }
     }
     }
 
 
-    user_to_delete.delete(conn).await
-}
+    log_event(
+        EventType::OrganizationUserRemoved as i32,
+        &user_to_delete.uuid,
+        String::from(org_id),
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        conn,
+    )
+    .await;
 
 
-#[post("/organizations/<org_id>/users/<org_user_id>/delete")]
-async fn post_delete_user(org_id: String, org_user_id: String, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
-    delete_user(org_id, org_user_id, headers, conn).await
+    user_to_delete.delete(conn).await
 }
 }
 
 
 #[post("/organizations/<org_id>/users/public-keys", data = "<data>")]
 #[post("/organizations/<org_id>/users/public-keys", data = "<data>")]
@@ -1223,6 +1354,7 @@ async fn post_org_import(
     data: JsonUpcase<ImportData>,
     data: JsonUpcase<ImportData>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
     nt: Notify<'_>,
     nt: Notify<'_>,
 ) -> EmptyResult {
 ) -> EmptyResult {
     let data: ImportData = data.into_inner().data;
     let data: ImportData = data.into_inner().data;
@@ -1249,7 +1381,9 @@ async fn post_org_import(
     let mut ciphers = Vec::new();
     let mut ciphers = Vec::new();
     for cipher_data in data.Ciphers {
     for cipher_data in data.Ciphers {
         let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
         let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
-        update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok();
+        update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &ip, &nt, UpdateType::None)
+            .await
+            .ok();
         ciphers.push(cipher);
         ciphers.push(cipher);
     }
     }
 
 
@@ -1333,8 +1467,9 @@ async fn put_policy(
     org_id: String,
     org_id: String,
     pol_type: i32,
     pol_type: i32,
     data: Json<PolicyData>,
     data: Json<PolicyData>,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let data: PolicyData = data.into_inner();
     let data: PolicyData = data.into_inner();
 
 
@@ -1360,6 +1495,18 @@ async fn put_policy(
 
 
                     mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
                     mail::send_2fa_removed_from_org(&user.email, &org.name).await?;
                 }
                 }
+
+                log_event(
+                    EventType::OrganizationUserRemoved as i32,
+                    &member.uuid,
+                    org_id.clone(),
+                    headers.user.uuid.clone(),
+                    headers.device.atype,
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
+
                 member.delete(&mut conn).await?;
                 member.delete(&mut conn).await?;
             }
             }
         }
         }
@@ -1382,6 +1529,18 @@ async fn put_policy(
 
 
                     mail::send_single_org_removed_from_org(&user.email, &org.name).await?;
                     mail::send_single_org_removed_from_org(&user.email, &org.name).await?;
                 }
                 }
+
+                log_event(
+                    EventType::OrganizationUserRemoved as i32,
+                    &member.uuid,
+                    org_id.clone(),
+                    headers.user.uuid.clone(),
+                    headers.device.atype,
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
+
                 member.delete(&mut conn).await?;
                 member.delete(&mut conn).await?;
             }
             }
         }
         }
@@ -1389,13 +1548,24 @@ async fn put_policy(
 
 
     let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await {
     let mut policy = match OrgPolicy::find_by_org_and_type(&org_id, pol_type_enum, &mut conn).await {
         Some(p) => p,
         Some(p) => p,
-        None => OrgPolicy::new(org_id, pol_type_enum, "{}".to_string()),
+        None => OrgPolicy::new(org_id.clone(), pol_type_enum, "{}".to_string()),
     };
     };
 
 
     policy.enabled = data.enabled;
     policy.enabled = data.enabled;
     policy.data = serde_json::to_string(&data.data)?;
     policy.data = serde_json::to_string(&data.data)?;
     policy.save(&mut conn).await?;
     policy.save(&mut conn).await?;
 
 
+    log_event(
+        EventType::PolicyUpdated as i32,
+        &policy.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     Ok(Json(policy.to_json()))
     Ok(Json(policy.to_json()))
 }
 }
 
 
@@ -1467,7 +1637,13 @@ struct OrgImportData {
 }
 }
 
 
 #[post("/organizations/<org_id>/import", data = "<data>")]
 #[post("/organizations/<org_id>/import", data = "<data>")]
-async fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Headers, mut conn: DbConn) -> EmptyResult {
+async fn import(
+    org_id: String,
+    data: JsonUpcase<OrgImportData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
     let data = data.into_inner().data;
     let data = data.into_inner().data;
 
 
     // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
     // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
@@ -1487,6 +1663,17 @@ async fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Header
             // If user is marked for deletion and it exists, delete it
             // If user is marked for deletion and it exists, delete it
             if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
             if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
             {
             {
+                log_event(
+                    EventType::OrganizationUserRemoved as i32,
+                    &user_org.uuid,
+                    org_id.clone(),
+                    headers.user.uuid.clone(),
+                    headers.device.atype,
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
+
                 user_org.delete(&mut conn).await?;
                 user_org.delete(&mut conn).await?;
             }
             }
 
 
@@ -1506,6 +1693,17 @@ async fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Header
 
 
                 new_org_user.save(&mut conn).await?;
                 new_org_user.save(&mut conn).await?;
 
 
+                log_event(
+                    EventType::OrganizationUserInvited as i32,
+                    &new_org_user.uuid,
+                    org_id.clone(),
+                    headers.user.uuid.clone(),
+                    headers.device.atype,
+                    &ip.ip,
+                    &mut conn,
+                )
+                .await;
+
                 if CONFIG.mail_enabled() {
                 if CONFIG.mail_enabled() {
                     let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await {
                     let org_name = match Organization::find_by_uuid(&org_id, &mut conn).await {
                         Some(org) => org.name,
                         Some(org) => org.name,
@@ -1531,6 +1729,17 @@ async fn import(org_id: String, data: JsonUpcase<OrgImportData>, headers: Header
         for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User, &mut conn).await {
         for user_org in UserOrganization::find_by_org_and_type(&org_id, UserOrgType::User, &mut conn).await {
             if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.email) {
             if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.email) {
                 if !data.Users.iter().any(|u| u.Email == user_email) {
                 if !data.Users.iter().any(|u| u.Email == user_email) {
+                    log_event(
+                        EventType::OrganizationUserRemoved as i32,
+                        &user_org.uuid,
+                        org_id.clone(),
+                        headers.user.uuid.clone(),
+                        headers.device.atype,
+                        &ip.ip,
+                        &mut conn,
+                    )
+                    .await;
+
                     user_org.delete(&mut conn).await?;
                     user_org.delete(&mut conn).await?;
                 }
                 }
             }
             }
@@ -1547,8 +1756,9 @@ async fn deactivate_organization_user(
     org_user_id: String,
     org_user_id: String,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn).await
+    _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
 }
 }
 
 
 // Pre web-vault v2022.9.x endpoint
 // Pre web-vault v2022.9.x endpoint
@@ -1558,8 +1768,9 @@ async fn bulk_deactivate_organization_user(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
-    bulk_revoke_organization_user(org_id, data, headers, conn).await
+    bulk_revoke_organization_user(org_id, data, headers, conn, ip).await
 }
 }
 
 
 #[put("/organizations/<org_id>/users/<org_user_id>/revoke")]
 #[put("/organizations/<org_id>/users/<org_user_id>/revoke")]
@@ -1568,8 +1779,9 @@ async fn revoke_organization_user(
     org_user_id: String,
     org_user_id: String,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn).await
+    _revoke_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
 }
 }
 
 
 #[put("/organizations/<org_id>/users/revoke", data = "<data>")]
 #[put("/organizations/<org_id>/users/revoke", data = "<data>")]
@@ -1578,6 +1790,7 @@ async fn bulk_revoke_organization_user(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
     let data = data.into_inner().data;
     let data = data.into_inner().data;
 
 
@@ -1586,7 +1799,7 @@ async fn bulk_revoke_organization_user(
         Some(org_users) => {
         Some(org_users) => {
             for org_user_id in org_users {
             for org_user_id in org_users {
                 let org_user_id = org_user_id.as_str().unwrap_or_default();
                 let org_user_id = org_user_id.as_str().unwrap_or_default();
-                let err_msg = match _revoke_organization_user(&org_id, org_user_id, &headers, &mut conn).await {
+                let err_msg = match _revoke_organization_user(&org_id, org_user_id, &headers, &mut conn, &ip).await {
                     Ok(_) => String::new(),
                     Ok(_) => String::new(),
                     Err(e) => format!("{:?}", e),
                     Err(e) => format!("{:?}", e),
                 };
                 };
@@ -1615,6 +1828,7 @@ async fn _revoke_organization_user(
     org_user_id: &str,
     org_user_id: &str,
     headers: &AdminHeaders,
     headers: &AdminHeaders,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
     match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
         Some(mut user_org) if user_org.status > UserOrgStatus::Revoked as i32 => {
         Some(mut user_org) if user_org.status > UserOrgStatus::Revoked as i32 => {
@@ -1632,6 +1846,17 @@ async fn _revoke_organization_user(
 
 
             user_org.revoke();
             user_org.revoke();
             user_org.save(conn).await?;
             user_org.save(conn).await?;
+
+            log_event(
+                EventType::OrganizationUserRevoked as i32,
+                &user_org.uuid,
+                org_id.to_string(),
+                headers.user.uuid.clone(),
+                headers.device.atype,
+                &ip.ip,
+                conn,
+            )
+            .await;
         }
         }
         Some(_) => err!("User is already revoked"),
         Some(_) => err!("User is already revoked"),
         None => err!("User not found in organization"),
         None => err!("User not found in organization"),
@@ -1646,8 +1871,9 @@ async fn activate_organization_user(
     org_user_id: String,
     org_user_id: String,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn).await
+    _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
 }
 }
 
 
 // Pre web-vault v2022.9.x endpoint
 // Pre web-vault v2022.9.x endpoint
@@ -1657,8 +1883,9 @@ async fn bulk_activate_organization_user(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
-    bulk_restore_organization_user(org_id, data, headers, conn).await
+    bulk_restore_organization_user(org_id, data, headers, conn, ip).await
 }
 }
 
 
 #[put("/organizations/<org_id>/users/<org_user_id>/restore")]
 #[put("/organizations/<org_id>/users/<org_user_id>/restore")]
@@ -1667,8 +1894,9 @@ async fn restore_organization_user(
     org_user_id: String,
     org_user_id: String,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn).await
+    _restore_organization_user(&org_id, &org_user_id, &headers, &mut conn, &ip).await
 }
 }
 
 
 #[put("/organizations/<org_id>/users/restore", data = "<data>")]
 #[put("/organizations/<org_id>/users/restore", data = "<data>")]
@@ -1677,6 +1905,7 @@ async fn bulk_restore_organization_user(
     data: JsonUpcase<Value>,
     data: JsonUpcase<Value>,
     headers: AdminHeaders,
     headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> Json<Value> {
 ) -> Json<Value> {
     let data = data.into_inner().data;
     let data = data.into_inner().data;
 
 
@@ -1685,7 +1914,7 @@ async fn bulk_restore_organization_user(
         Some(org_users) => {
         Some(org_users) => {
             for org_user_id in org_users {
             for org_user_id in org_users {
                 let org_user_id = org_user_id.as_str().unwrap_or_default();
                 let org_user_id = org_user_id.as_str().unwrap_or_default();
-                let err_msg = match _restore_organization_user(&org_id, org_user_id, &headers, &mut conn).await {
+                let err_msg = match _restore_organization_user(&org_id, org_user_id, &headers, &mut conn, &ip).await {
                     Ok(_) => String::new(),
                     Ok(_) => String::new(),
                     Err(e) => format!("{:?}", e),
                     Err(e) => format!("{:?}", e),
                 };
                 };
@@ -1714,6 +1943,7 @@ async fn _restore_organization_user(
     org_user_id: &str,
     org_user_id: &str,
     headers: &AdminHeaders,
     headers: &AdminHeaders,
     conn: &mut DbConn,
     conn: &mut DbConn,
+    ip: &ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
     match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
         Some(mut user_org) if user_org.status < UserOrgStatus::Accepted as i32 => {
         Some(mut user_org) if user_org.status < UserOrgStatus::Accepted as i32 => {
@@ -1740,6 +1970,17 @@ async fn _restore_organization_user(
 
 
             user_org.restore();
             user_org.restore();
             user_org.save(conn).await?;
             user_org.save(conn).await?;
+
+            log_event(
+                EventType::OrganizationUserRestored as i32,
+                &user_org.uuid,
+                org_id.to_string(),
+                headers.user.uuid.clone(),
+                headers.device.atype,
+                &ip.ip,
+                conn,
+            )
+            .await;
         }
         }
         Some(_) => err!("User is already active"),
         Some(_) => err!("User is already active"),
         None => err!("User not found in organization"),
         None => err!("User not found in organization"),
@@ -1828,37 +2069,51 @@ impl SelectionReadOnly {
     }
     }
 }
 }
 
 
-#[post("/organizations/<_org_id>/groups/<group_id>", data = "<data>")]
+#[post("/organizations/<org_id>/groups/<group_id>", data = "<data>")]
 async fn post_group(
 async fn post_group(
-    _org_id: String,
+    org_id: String,
     group_id: String,
     group_id: String,
     data: JsonUpcase<GroupRequest>,
     data: JsonUpcase<GroupRequest>,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
-    put_group(_org_id, group_id, data, _headers, conn).await
+    put_group(org_id, group_id, data, headers, conn, ip).await
 }
 }
 
 
 #[post("/organizations/<org_id>/groups", data = "<data>")]
 #[post("/organizations/<org_id>/groups", data = "<data>")]
 async fn post_groups(
 async fn post_groups(
     org_id: String,
     org_id: String,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     data: JsonUpcase<GroupRequest>,
     data: JsonUpcase<GroupRequest>,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let group_request = data.into_inner().data;
     let group_request = data.into_inner().data;
     let group = group_request.to_group(&org_id)?;
     let group = group_request.to_group(&org_id)?;
 
 
+    log_event(
+        EventType::GroupCreated as i32,
+        &group.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     add_update_group(group, group_request.Collections, &mut conn).await
     add_update_group(group, group_request.Collections, &mut conn).await
 }
 }
 
 
-#[put("/organizations/<_org_id>/groups/<group_id>", data = "<data>")]
+#[put("/organizations/<org_id>/groups/<group_id>", data = "<data>")]
 async fn put_group(
 async fn put_group(
-    _org_id: String,
+    org_id: String,
     group_id: String,
     group_id: String,
     data: JsonUpcase<GroupRequest>,
     data: JsonUpcase<GroupRequest>,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> JsonResult {
 ) -> JsonResult {
     let group = match Group::find_by_uuid(&group_id, &mut conn).await {
     let group = match Group::find_by_uuid(&group_id, &mut conn).await {
         Some(group) => group,
         Some(group) => group,
@@ -1870,6 +2125,17 @@ async fn put_group(
 
 
     CollectionGroup::delete_all_by_group(&group_id, &mut conn).await?;
     CollectionGroup::delete_all_by_group(&group_id, &mut conn).await?;
 
 
+    log_event(
+        EventType::GroupUpdated as i32,
+        &updated_group.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     add_update_group(updated_group, group_request.Collections, &mut conn).await
     add_update_group(updated_group, group_request.Collections, &mut conn).await
 }
 }
 
 
@@ -1915,17 +2181,40 @@ async fn get_group_details(_org_id: String, group_id: String, _headers: AdminHea
 }
 }
 
 
 #[post("/organizations/<org_id>/groups/<group_id>/delete")]
 #[post("/organizations/<org_id>/groups/<group_id>/delete")]
-async fn post_delete_group(org_id: String, group_id: String, _headers: AdminHeaders, conn: DbConn) -> EmptyResult {
-    delete_group(org_id, group_id, _headers, conn).await
+async fn post_delete_group(
+    org_id: String,
+    group_id: String,
+    headers: AdminHeaders,
+    conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
+    delete_group(org_id, group_id, headers, conn, ip).await
 }
 }
 
 
-#[delete("/organizations/<_org_id>/groups/<group_id>")]
-async fn delete_group(_org_id: String, group_id: String, _headers: AdminHeaders, mut conn: DbConn) -> EmptyResult {
+#[delete("/organizations/<org_id>/groups/<group_id>")]
+async fn delete_group(
+    org_id: String,
+    group_id: String,
+    headers: AdminHeaders,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> EmptyResult {
     let group = match Group::find_by_uuid(&group_id, &mut conn).await {
     let group = match Group::find_by_uuid(&group_id, &mut conn).await {
         Some(group) => group,
         Some(group) => group,
         _ => err!("Group not found"),
         _ => err!("Group not found"),
     };
     };
 
 
+    log_event(
+        EventType::GroupDeleted as i32,
+        &group.uuid,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     group.delete(&mut conn).await
     group.delete(&mut conn).await
 }
 }
 
 
@@ -1955,13 +2244,14 @@ async fn get_group_users(_org_id: String, group_id: String, _headers: AdminHeade
     Ok(Json(json!(group_users)))
     Ok(Json(json!(group_users)))
 }
 }
 
 
-#[put("/organizations/<_org_id>/groups/<group_id>/users", data = "<data>")]
+#[put("/organizations/<org_id>/groups/<group_id>/users", data = "<data>")]
 async fn put_group_users(
 async fn put_group_users(
-    _org_id: String,
+    org_id: String,
     group_id: String,
     group_id: String,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     data: JsonVec<String>,
     data: JsonVec<String>,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
     match Group::find_by_uuid(&group_id, &mut conn).await {
     match Group::find_by_uuid(&group_id, &mut conn).await {
         Some(_) => { /* Do nothing */ }
         Some(_) => { /* Do nothing */ }
@@ -1972,8 +2262,19 @@ async fn put_group_users(
 
 
     let assigned_user_ids = data.into_inner();
     let assigned_user_ids = data.into_inner();
     for assigned_user_id in assigned_user_ids {
     for assigned_user_id in assigned_user_ids {
-        let mut user_entry = GroupUser::new(group_id.clone(), assigned_user_id);
+        let mut user_entry = GroupUser::new(group_id.clone(), assigned_user_id.clone());
         user_entry.save(&mut conn).await?;
         user_entry.save(&mut conn).await?;
+
+        log_event(
+            EventType::OrganizationUserUpdatedGroups as i32,
+            &assigned_user_id,
+            org_id.clone(),
+            headers.user.uuid.clone(),
+            headers.device.atype,
+            &ip.ip,
+            &mut conn,
+        )
+        .await;
     }
     }
 
 
     Ok(())
     Ok(())
@@ -1998,61 +2299,76 @@ struct OrganizationUserUpdateGroupsRequest {
     GroupIds: Vec<String>,
     GroupIds: Vec<String>,
 }
 }
 
 
-#[post("/organizations/<_org_id>/users/<user_id>/groups", data = "<data>")]
+#[post("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")]
 async fn post_user_groups(
 async fn post_user_groups(
-    _org_id: String,
-    user_id: String,
+    org_id: String,
+    org_user_id: String,
     data: JsonUpcase<OrganizationUserUpdateGroupsRequest>,
     data: JsonUpcase<OrganizationUserUpdateGroupsRequest>,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    put_user_groups(_org_id, user_id, data, _headers, conn).await
+    put_user_groups(org_id, org_user_id, data, headers, conn, ip).await
 }
 }
 
 
-#[put("/organizations/<_org_id>/users/<user_id>/groups", data = "<data>")]
+#[put("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")]
 async fn put_user_groups(
 async fn put_user_groups(
-    _org_id: String,
-    user_id: String,
+    org_id: String,
+    org_user_id: String,
     data: JsonUpcase<OrganizationUserUpdateGroupsRequest>,
     data: JsonUpcase<OrganizationUserUpdateGroupsRequest>,
-    _headers: AdminHeaders,
+    headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    match UserOrganization::find_by_uuid(&user_id, &mut conn).await {
+    match UserOrganization::find_by_uuid(&org_user_id, &mut conn).await {
         Some(_) => { /* Do nothing */ }
         Some(_) => { /* Do nothing */ }
         _ => err!("User could not be found!"),
         _ => err!("User could not be found!"),
     };
     };
 
 
-    GroupUser::delete_all_by_user(&user_id, &mut conn).await?;
+    GroupUser::delete_all_by_user(&org_user_id, &mut conn).await?;
 
 
     let assigned_group_ids = data.into_inner().data;
     let assigned_group_ids = data.into_inner().data;
     for assigned_group_id in assigned_group_ids.GroupIds {
     for assigned_group_id in assigned_group_ids.GroupIds {
-        let mut group_user = GroupUser::new(assigned_group_id.clone(), user_id.clone());
+        let mut group_user = GroupUser::new(assigned_group_id.clone(), org_user_id.clone());
         group_user.save(&mut conn).await?;
         group_user.save(&mut conn).await?;
     }
     }
 
 
+    log_event(
+        EventType::OrganizationUserUpdatedGroups as i32,
+        &org_user_id,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
     Ok(())
     Ok(())
 }
 }
 
 
-#[post("/organizations/<org_id>/groups/<group_id>/delete-user/<user_id>")]
+#[post("/organizations/<org_id>/groups/<group_id>/delete-user/<org_user_id>")]
 async fn post_delete_group_user(
 async fn post_delete_group_user(
     org_id: String,
     org_id: String,
     group_id: String,
     group_id: String,
-    user_id: String,
+    org_user_id: String,
     headers: AdminHeaders,
     headers: AdminHeaders,
     conn: DbConn,
     conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    delete_group_user(org_id, group_id, user_id, headers, conn).await
+    delete_group_user(org_id, group_id, org_user_id, headers, conn, ip).await
 }
 }
 
 
-#[delete("/organizations/<_org_id>/groups/<group_id>/users/<user_id>")]
+#[delete("/organizations/<org_id>/groups/<group_id>/users/<org_user_id>")]
 async fn delete_group_user(
 async fn delete_group_user(
-    _org_id: String,
+    org_id: String,
     group_id: String,
     group_id: String,
-    user_id: String,
-    _headers: AdminHeaders,
+    org_user_id: String,
+    headers: AdminHeaders,
     mut conn: DbConn,
     mut conn: DbConn,
+    ip: ClientIp,
 ) -> EmptyResult {
 ) -> EmptyResult {
-    match UserOrganization::find_by_uuid(&user_id, &mut conn).await {
+    match UserOrganization::find_by_uuid(&org_user_id, &mut conn).await {
         Some(_) => { /* Do nothing */ }
         Some(_) => { /* Do nothing */ }
         _ => err!("User could not be found!"),
         _ => err!("User could not be found!"),
     };
     };
@@ -2062,7 +2378,18 @@ async fn delete_group_user(
         _ => err!("Group could not be found!"),
         _ => err!("Group could not be found!"),
     };
     };
 
 
-    GroupUser::delete_by_group_id_and_user_id(&group_id, &user_id, &mut conn).await
+    log_event(
+        EventType::OrganizationUserUpdatedGroups as i32,
+        &org_user_id,
+        org_id,
+        headers.user.uuid.clone(),
+        headers.device.atype,
+        &ip.ip,
+        &mut conn,
+    )
+    .await;
+
+    GroupUser::delete_by_group_id_and_user_id(&group_id, &org_user_id, &mut conn).await
 }
 }
 
 
 // This is a new function active since the v2022.9.x clients.
 // This is a new function active since the v2022.9.x clients.

+ 17 - 4
src/api/core/two_factor/authenticator.rs

@@ -4,12 +4,13 @@ use rocket::Route;
 
 
 use crate::{
 use crate::{
     api::{
     api::{
-        core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
+        core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase,
+        NumberOrString, PasswordData,
     },
     },
     auth::{ClientIp, Headers},
     auth::{ClientIp, Headers},
     crypto,
     crypto,
     db::{
     db::{
-        models::{TwoFactor, TwoFactorType},
+        models::{EventType, TwoFactor, TwoFactorType},
         DbConn,
         DbConn,
     },
     },
 };
 };
@@ -85,6 +86,8 @@ async fn activate_authenticator(
 
 
     _generate_recover_code(&mut user, &mut conn).await;
     _generate_recover_code(&mut user, &mut conn).await;
 
 
+    log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     Ok(Json(json!({
     Ok(Json(json!({
         "Enabled": true,
         "Enabled": true,
         "Key": key,
         "Key": key,
@@ -167,10 +170,20 @@ pub async fn validate_totp_code(
             return Ok(());
             return Ok(());
         } else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
         } else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
             warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
             warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps);
-            err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
+            err!(
+                format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
+                ErrorEvent {
+                    event: EventType::UserFailedLogIn2fa
+                }
+            );
         }
         }
     }
     }
 
 
     // Else no valide code received, deny access
     // Else no valide code received, deny access
-    err!(format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip));
+    err!(
+        format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip),
+        ErrorEvent {
+            event: EventType::UserFailedLogIn2fa
+        }
+    );
 }
 }

+ 23 - 8
src/api/core/two_factor/duo.rs

@@ -4,11 +4,14 @@ use rocket::serde::json::Json;
 use rocket::Route;
 use rocket::Route;
 
 
 use crate::{
 use crate::{
-    api::{core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, PasswordData},
-    auth::Headers,
+    api::{
+        core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase,
+        PasswordData,
+    },
+    auth::{ClientIp, Headers},
     crypto,
     crypto,
     db::{
     db::{
-        models::{TwoFactor, TwoFactorType, User},
+        models::{EventType, TwoFactor, TwoFactorType, User},
         DbConn,
         DbConn,
     },
     },
     error::MapResult,
     error::MapResult,
@@ -152,7 +155,7 @@ fn check_duo_fields_custom(data: &EnableDuoData) -> bool {
 }
 }
 
 
 #[post("/two-factor/duo", data = "<data>")]
 #[post("/two-factor/duo", data = "<data>")]
-async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
     let data: EnableDuoData = data.into_inner().data;
     let data: EnableDuoData = data.into_inner().data;
     let mut user = headers.user;
     let mut user = headers.user;
 
 
@@ -175,6 +178,8 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
 
 
     _generate_recover_code(&mut user, &mut conn).await;
     _generate_recover_code(&mut user, &mut conn).await;
 
 
+    log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     Ok(Json(json!({
     Ok(Json(json!({
         "Enabled": true,
         "Enabled": true,
         "Host": data.host,
         "Host": data.host,
@@ -185,8 +190,8 @@ async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut con
 }
 }
 
 
 #[put("/two-factor/duo", data = "<data>")]
 #[put("/two-factor/duo", data = "<data>")]
-async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult {
-    activate_duo(data, headers, conn).await
+async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn, ip: ClientIp) -> JsonResult {
+    activate_duo(data, headers, conn, ip).await
 }
 }
 
 
 async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
 async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult {
@@ -282,7 +287,12 @@ pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn)
 
 
     let split: Vec<&str> = response.split(':').collect();
     let split: Vec<&str> = response.split(':').collect();
     if split.len() != 2 {
     if split.len() != 2 {
-        err!("Invalid response length");
+        err!(
+            "Invalid response length",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        );
     }
     }
 
 
     let auth_sig = split[0];
     let auth_sig = split[0];
@@ -296,7 +306,12 @@ pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn)
     let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
     let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?;
 
 
     if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
     if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) {
-        err!("Error validating duo authentication")
+        err!(
+            "Error validating duo authentication",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        )
     }
     }
 
 
     Ok(())
     Ok(())

+ 29 - 8
src/api/core/two_factor/email.rs

@@ -3,11 +3,14 @@ use rocket::serde::json::Json;
 use rocket::Route;
 use rocket::Route;
 
 
 use crate::{
 use crate::{
-    api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
-    auth::Headers,
+    api::{
+        core::{log_user_event, two_factor::_generate_recover_code},
+        EmptyResult, JsonResult, JsonUpcase, PasswordData,
+    },
+    auth::{ClientIp, Headers},
     crypto,
     crypto,
     db::{
     db::{
-        models::{TwoFactor, TwoFactorType},
+        models::{EventType, TwoFactor, TwoFactorType},
         DbConn,
         DbConn,
     },
     },
     error::{Error, MapResult},
     error::{Error, MapResult},
@@ -147,7 +150,7 @@ struct EmailData {
 
 
 /// Verify email belongs to user and can be used for 2FA email codes.
 /// Verify email belongs to user and can be used for 2FA email codes.
 #[put("/two-factor/email", data = "<data>")]
 #[put("/two-factor/email", data = "<data>")]
-async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
     let data: EmailData = data.into_inner().data;
     let data: EmailData = data.into_inner().data;
     let mut user = headers.user;
     let mut user = headers.user;
 
 
@@ -177,6 +180,8 @@ async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn)
 
 
     _generate_recover_code(&mut user, &mut conn).await;
     _generate_recover_code(&mut user, &mut conn).await;
 
 
+    log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     Ok(Json(json!({
     Ok(Json(json!({
         "Email": email_data.email,
         "Email": email_data.email,
         "Enabled": "true",
         "Enabled": "true",
@@ -192,7 +197,12 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
         .map_res("Two factor not found")?;
         .map_res("Two factor not found")?;
     let issued_token = match &email_data.last_token {
     let issued_token = match &email_data.last_token {
         Some(t) => t,
         Some(t) => t,
-        _ => err!("No token available"),
+        _ => err!(
+            "No token available",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        ),
     };
     };
 
 
     if !crypto::ct_eq(issued_token, token) {
     if !crypto::ct_eq(issued_token, token) {
@@ -203,21 +213,32 @@ pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, c
         twofactor.data = email_data.to_json();
         twofactor.data = email_data.to_json();
         twofactor.save(conn).await?;
         twofactor.save(conn).await?;
 
 
-        err!("Token is invalid")
+        err!(
+            "Token is invalid",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        )
     }
     }
 
 
     email_data.reset_token();
     email_data.reset_token();
     twofactor.data = email_data.to_json();
     twofactor.data = email_data.to_json();
     twofactor.save(conn).await?;
     twofactor.save(conn).await?;
 
 
-    let date = NaiveDateTime::from_timestamp(email_data.token_sent, 0);
+    let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid.");
     let max_time = CONFIG.email_expiration_time() as i64;
     let max_time = CONFIG.email_expiration_time() as i64;
     if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
     if date + Duration::seconds(max_time) < Utc::now().naive_utc() {
-        err!("Token has expired")
+        err!(
+            "Token has expired",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        )
     }
     }
 
 
     Ok(())
     Ok(())
 }
 }
+
 /// Data stored in the TwoFactor table in the db
 /// Data stored in the TwoFactor table in the db
 #[derive(Serialize, Deserialize)]
 #[derive(Serialize, Deserialize)]
 pub struct EmailTokenData {
 pub struct EmailTokenData {

+ 19 - 6
src/api/core/two_factor/mod.rs

@@ -5,8 +5,8 @@ use rocket::Route;
 use serde_json::Value;
 use serde_json::Value;
 
 
 use crate::{
 use crate::{
-    api::{JsonResult, JsonUpcase, NumberOrString, PasswordData},
-    auth::Headers,
+    api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData},
+    auth::{ClientIp, Headers},
     crypto,
     crypto,
     db::{models::*, DbConn, DbPool},
     db::{models::*, DbConn, DbPool},
     mail, CONFIG,
     mail, CONFIG,
@@ -73,7 +73,7 @@ struct RecoverTwoFactor {
 }
 }
 
 
 #[post("/two-factor/recover", data = "<data>")]
 #[post("/two-factor/recover", data = "<data>")]
-async fn recover(data: JsonUpcase<RecoverTwoFactor>, mut conn: DbConn) -> JsonResult {
+async fn recover(data: JsonUpcase<RecoverTwoFactor>, headers: Headers, mut conn: DbConn, ip: ClientIp) -> JsonResult {
     let data: RecoverTwoFactor = data.into_inner().data;
     let data: RecoverTwoFactor = data.into_inner().data;
 
 
     use crate::db::models::User;
     use crate::db::models::User;
@@ -97,6 +97,8 @@ async fn recover(data: JsonUpcase<RecoverTwoFactor>, mut conn: DbConn) -> JsonRe
     // Remove all twofactors from the user
     // Remove all twofactors from the user
     TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
     TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
 
 
+    log_user_event(EventType::UserRecovered2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     // Remove the recovery code, not needed without twofactors
     // Remove the recovery code, not needed without twofactors
     user.totp_recover = None;
     user.totp_recover = None;
     user.save(&mut conn).await?;
     user.save(&mut conn).await?;
@@ -119,7 +121,12 @@ struct DisableTwoFactorData {
 }
 }
 
 
 #[post("/two-factor/disable", data = "<data>")]
 #[post("/two-factor/disable", data = "<data>")]
-async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn disable_twofactor(
+    data: JsonUpcase<DisableTwoFactorData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
     let data: DisableTwoFactorData = data.into_inner().data;
     let data: DisableTwoFactorData = data.into_inner().data;
     let password_hash = data.MasterPasswordHash;
     let password_hash = data.MasterPasswordHash;
     let user = headers.user;
     let user = headers.user;
@@ -132,6 +139,7 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
 
 
     if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
     if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
         twofactor.delete(&mut conn).await?;
         twofactor.delete(&mut conn).await?;
+        log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
     }
     }
 
 
     let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
     let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty();
@@ -160,8 +168,13 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head
 }
 }
 
 
 #[put("/two-factor/disable", data = "<data>")]
 #[put("/two-factor/disable", data = "<data>")]
-async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult {
-    disable_twofactor(data, headers, conn).await
+async fn disable_twofactor_put(
+    data: JsonUpcase<DisableTwoFactorData>,
+    headers: Headers,
+    conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
+    disable_twofactor(data, headers, conn, ip).await
 }
 }
 
 
 pub async fn send_incomplete_2fa_notifications(pool: DbPool) {
 pub async fn send_incomplete_2fa_notifications(pool: DbPool) {

+ 31 - 8
src/api/core/two_factor/webauthn.rs

@@ -6,11 +6,12 @@ use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState,
 
 
 use crate::{
 use crate::{
     api::{
     api::{
-        core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
+        core::{log_user_event, two_factor::_generate_recover_code},
+        EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData,
     },
     },
-    auth::Headers,
+    auth::{ClientIp, Headers},
     db::{
     db::{
-        models::{TwoFactor, TwoFactorType},
+        models::{EventType, TwoFactor, TwoFactorType},
         DbConn,
         DbConn,
     },
     },
     error::Error,
     error::Error,
@@ -241,7 +242,12 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
 }
 }
 
 
 #[post("/two-factor/webauthn", data = "<data>")]
 #[post("/two-factor/webauthn", data = "<data>")]
-async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn activate_webauthn(
+    data: JsonUpcase<EnableWebauthnData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
     let data: EnableWebauthnData = data.into_inner().data;
     let data: EnableWebauthnData = data.into_inner().data;
     let mut user = headers.user;
     let mut user = headers.user;
 
 
@@ -280,6 +286,8 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
         .await?;
         .await?;
     _generate_recover_code(&mut user, &mut conn).await;
     _generate_recover_code(&mut user, &mut conn).await;
 
 
+    log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
     let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect();
     Ok(Json(json!({
     Ok(Json(json!({
         "Enabled": true,
         "Enabled": true,
@@ -289,8 +297,13 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header
 }
 }
 
 
 #[put("/two-factor/webauthn", data = "<data>")]
 #[put("/two-factor/webauthn", data = "<data>")]
-async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult {
-    activate_webauthn(data, headers, conn).await
+async fn activate_webauthn_put(
+    data: JsonUpcase<EnableWebauthnData>,
+    headers: Headers,
+    conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
+    activate_webauthn(data, headers, conn, ip).await
 }
 }
 
 
 #[derive(Deserialize, Debug)]
 #[derive(Deserialize, Debug)]
@@ -391,7 +404,12 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
             tf.delete(conn).await?;
             tf.delete(conn).await?;
             state
             state
         }
         }
-        None => err!("Can't recover login challenge"),
+        None => err!(
+            "Can't recover login challenge",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        ),
     };
     };
 
 
     let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
     let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
@@ -414,5 +432,10 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut
         }
         }
     }
     }
 
 
-    err!("Credential not present")
+    err!(
+        "Credential not present",
+        ErrorEvent {
+            event: EventType::UserFailedLogIn2fa
+        }
+    )
 }
 }

+ 21 - 6
src/api/core/two_factor/yubikey.rs

@@ -4,10 +4,13 @@ use serde_json::Value;
 use yubico::{config::Config, verify};
 use yubico::{config::Config, verify};
 
 
 use crate::{
 use crate::{
-    api::{core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, PasswordData},
-    auth::Headers,
+    api::{
+        core::{log_user_event, two_factor::_generate_recover_code},
+        EmptyResult, JsonResult, JsonUpcase, PasswordData,
+    },
+    auth::{ClientIp, Headers},
     db::{
     db::{
-        models::{TwoFactor, TwoFactorType},
+        models::{EventType, TwoFactor, TwoFactorType},
         DbConn,
         DbConn,
     },
     },
     error::{Error, MapResult},
     error::{Error, MapResult},
@@ -113,7 +116,12 @@ async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut
 }
 }
 
 
 #[post("/two-factor/yubikey", data = "<data>")]
 #[post("/two-factor/yubikey", data = "<data>")]
-async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn activate_yubikey(
+    data: JsonUpcase<EnableYubikeyData>,
+    headers: Headers,
+    mut conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
     let data: EnableYubikeyData = data.into_inner().data;
     let data: EnableYubikeyData = data.into_inner().data;
     let mut user = headers.user;
     let mut user = headers.user;
 
 
@@ -159,6 +167,8 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
 
 
     _generate_recover_code(&mut user, &mut conn).await;
     _generate_recover_code(&mut user, &mut conn).await;
 
 
+    log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &ip.ip, &mut conn).await;
+
     let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
     let mut result = jsonify_yubikeys(yubikey_metadata.Keys);
 
 
     result["Enabled"] = Value::Bool(true);
     result["Enabled"] = Value::Bool(true);
@@ -169,8 +179,13 @@ async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers,
 }
 }
 
 
 #[put("/two-factor/yubikey", data = "<data>")]
 #[put("/two-factor/yubikey", data = "<data>")]
-async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult {
-    activate_yubikey(data, headers, conn).await
+async fn activate_yubikey_put(
+    data: JsonUpcase<EnableYubikeyData>,
+    headers: Headers,
+    conn: DbConn,
+    ip: ClientIp,
+) -> JsonResult {
+    activate_yubikey(data, headers, conn, ip).await
 }
 }
 
 
 pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {
 pub fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult {

+ 115 - 32
src/api/identity.rs

@@ -10,6 +10,7 @@ use serde_json::Value;
 use crate::{
 use crate::{
     api::{
     api::{
         core::accounts::{PreloginData, RegisterData, _prelogin, _register},
         core::accounts::{PreloginData, RegisterData, _prelogin, _register},
+        core::log_user_event,
         core::two_factor::{duo, email, email::EmailTokenData, yubikey},
         core::two_factor::{duo, email, email::EmailTokenData, yubikey},
         ApiResult, EmptyResult, JsonResult, JsonUpcase,
         ApiResult, EmptyResult, JsonResult, JsonUpcase,
     },
     },
@@ -24,13 +25,16 @@ pub fn routes() -> Vec<Route> {
 }
 }
 
 
 #[post("/connect/token", data = "<data>")]
 #[post("/connect/token", data = "<data>")]
-async fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResult {
+async fn login(data: Form<ConnectData>, mut conn: DbConn, ip: ClientIp) -> JsonResult {
     let data: ConnectData = data.into_inner();
     let data: ConnectData = data.into_inner();
 
 
-    match data.grant_type.as_ref() {
+    let mut user_uuid: Option<String> = None;
+    let device_type = data.device_type.clone();
+
+    let login_result = match data.grant_type.as_ref() {
         "refresh_token" => {
         "refresh_token" => {
             _check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
             _check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
-            _refresh_login(data, conn).await
+            _refresh_login(data, &mut conn).await
         }
         }
         "password" => {
         "password" => {
             _check_is_some(&data.client_id, "client_id cannot be blank")?;
             _check_is_some(&data.client_id, "client_id cannot be blank")?;
@@ -42,34 +46,51 @@ async fn login(data: Form<ConnectData>, conn: DbConn, ip: ClientIp) -> JsonResul
             _check_is_some(&data.device_name, "device_name cannot be blank")?;
             _check_is_some(&data.device_name, "device_name cannot be blank")?;
             _check_is_some(&data.device_type, "device_type cannot be blank")?;
             _check_is_some(&data.device_type, "device_type cannot be blank")?;
 
 
-            _password_login(data, conn, &ip).await
+            _password_login(data, &mut user_uuid, &mut conn, &ip).await
         }
         }
         "client_credentials" => {
         "client_credentials" => {
             _check_is_some(&data.client_id, "client_id cannot be blank")?;
             _check_is_some(&data.client_id, "client_id cannot be blank")?;
             _check_is_some(&data.client_secret, "client_secret cannot be blank")?;
             _check_is_some(&data.client_secret, "client_secret cannot be blank")?;
             _check_is_some(&data.scope, "scope cannot be blank")?;
             _check_is_some(&data.scope, "scope cannot be blank")?;
 
 
-            _api_key_login(data, conn, &ip).await
+            _api_key_login(data, &mut user_uuid, &mut conn, &ip).await
         }
         }
         t => err!("Invalid type", t),
         t => err!("Invalid type", t),
+    };
+
+    if let Some(user_uuid) = user_uuid {
+        // When unknown or unable to parse, return 14, which is 'Unknown Browser'
+        let device_type = util::try_parse_string(device_type).unwrap_or(14);
+        match &login_result {
+            Ok(_) => {
+                log_user_event(EventType::UserLoggedIn as i32, &user_uuid, device_type, &ip.ip, &mut conn).await;
+            }
+            Err(e) => {
+                if let Some(ev) = e.get_event() {
+                    log_user_event(ev.event as i32, &user_uuid, device_type, &ip.ip, &mut conn).await
+                }
+            }
+        }
     }
     }
+
+    login_result
 }
 }
 
 
-async fn _refresh_login(data: ConnectData, mut conn: DbConn) -> JsonResult {
+async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
     // Extract token
     // Extract token
     let token = data.refresh_token.unwrap();
     let token = data.refresh_token.unwrap();
 
 
     // Get device by refresh token
     // Get device by refresh token
-    let mut device = Device::find_by_refresh_token(&token, &mut conn).await.map_res("Invalid refresh token")?;
+    let mut device = Device::find_by_refresh_token(&token, conn).await.map_res("Invalid refresh token")?;
 
 
     let scope = "api offline_access";
     let scope = "api offline_access";
     let scope_vec = vec!["api".into(), "offline_access".into()];
     let scope_vec = vec!["api".into(), "offline_access".into()];
 
 
     // Common
     // Common
-    let user = User::find_by_uuid(&device.user_uuid, &mut conn).await.unwrap();
-    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await;
+    let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
+    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
-    device.save(&mut conn).await?;
+    device.save(conn).await?;
 
 
     Ok(Json(json!({
     Ok(Json(json!({
         "access_token": access_token,
         "access_token": access_token,
@@ -87,7 +108,12 @@ async fn _refresh_login(data: ConnectData, mut conn: DbConn) -> JsonResult {
     })))
     })))
 }
 }
 
 
-async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> JsonResult {
+async fn _password_login(
+    data: ConnectData,
+    user_uuid: &mut Option<String>,
+    conn: &mut DbConn,
+    ip: &ClientIp,
+) -> JsonResult {
     // Validate scope
     // Validate scope
     let scope = data.scope.as_ref().unwrap();
     let scope = data.scope.as_ref().unwrap();
     if scope != "api offline_access" {
     if scope != "api offline_access" {
@@ -100,20 +126,35 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) ->
 
 
     // Get the user
     // Get the user
     let username = data.username.as_ref().unwrap().trim();
     let username = data.username.as_ref().unwrap().trim();
-    let user = match User::find_by_mail(username, &mut conn).await {
+    let user = match User::find_by_mail(username, conn).await {
         Some(user) => user,
         Some(user) => user,
         None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
         None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)),
     };
     };
 
 
+    // Set the user_uuid here to be passed back used for event logging.
+    *user_uuid = Some(user.uuid.clone());
+
     // Check password
     // Check password
     let password = data.password.as_ref().unwrap();
     let password = data.password.as_ref().unwrap();
     if !user.check_valid_password(password) {
     if !user.check_valid_password(password) {
-        err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username))
+        err!(
+            "Username or password is incorrect. Try again",
+            format!("IP: {}. Username: {}.", ip.ip, username),
+            ErrorEvent {
+                event: EventType::UserFailedLogIn,
+            }
+        )
     }
     }
 
 
     // Check if the user is disabled
     // Check if the user is disabled
     if !user.enabled {
     if !user.enabled {
-        err!("This user has been disabled", format!("IP: {}. Username: {}.", ip.ip, username))
+        err!(
+            "This user has been disabled",
+            format!("IP: {}. Username: {}.", ip.ip, username),
+            ErrorEvent {
+                event: EventType::UserFailedLogIn
+            }
+        )
     }
     }
 
 
     let now = Utc::now().naive_utc();
     let now = Utc::now().naive_utc();
@@ -131,7 +172,7 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) ->
                 user.last_verifying_at = Some(now);
                 user.last_verifying_at = Some(now);
                 user.login_verify_count += 1;
                 user.login_verify_count += 1;
 
 
-                if let Err(e) = user.save(&mut conn).await {
+                if let Err(e) = user.save(conn).await {
                     error!("Error updating user: {:#?}", e);
                     error!("Error updating user: {:#?}", e);
                 }
                 }
 
 
@@ -142,27 +183,38 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) ->
         }
         }
 
 
         // We still want the login to fail until they actually verified the email address
         // We still want the login to fail until they actually verified the email address
-        err!("Please verify your email before trying again.", format!("IP: {}. Username: {}.", ip.ip, username))
+        err!(
+            "Please verify your email before trying again.",
+            format!("IP: {}. Username: {}.", ip.ip, username),
+            ErrorEvent {
+                event: EventType::UserFailedLogIn
+            }
+        )
     }
     }
 
 
-    let (mut device, new_device) = get_device(&data, &mut conn, &user).await;
+    let (mut device, new_device) = get_device(&data, conn, &user).await;
 
 
-    let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, &mut conn).await?;
+    let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
 
 
     if CONFIG.mail_enabled() && new_device {
     if CONFIG.mail_enabled() && new_device {
         if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
         if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await {
             error!("Error sending new device email: {:#?}", e);
             error!("Error sending new device email: {:#?}", e);
 
 
             if CONFIG.require_device_email() {
             if CONFIG.require_device_email() {
-                err!("Could not send login notification email. Please contact your administrator.")
+                err!(
+                    "Could not send login notification email. Please contact your administrator.",
+                    ErrorEvent {
+                        event: EventType::UserFailedLogIn
+                    }
+                )
             }
             }
         }
         }
     }
     }
 
 
     // Common
     // Common
-    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await;
+    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
-    device.save(&mut conn).await?;
+    device.save(conn).await?;
 
 
     let mut result = json!({
     let mut result = json!({
         "access_token": access_token,
         "access_token": access_token,
@@ -188,7 +240,12 @@ async fn _password_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) ->
     Ok(Json(result))
     Ok(Json(result))
 }
 }
 
 
-async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> JsonResult {
+async fn _api_key_login(
+    data: ConnectData,
+    user_uuid: &mut Option<String>,
+    conn: &mut DbConn,
+    ip: &ClientIp,
+) -> JsonResult {
     // Validate scope
     // Validate scope
     let scope = data.scope.as_ref().unwrap();
     let scope = data.scope.as_ref().unwrap();
     if scope != "api" {
     if scope != "api" {
@@ -201,27 +258,42 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J
 
 
     // Get the user via the client_id
     // Get the user via the client_id
     let client_id = data.client_id.as_ref().unwrap();
     let client_id = data.client_id.as_ref().unwrap();
-    let user_uuid = match client_id.strip_prefix("user.") {
+    let client_user_uuid = match client_id.strip_prefix("user.") {
         Some(uuid) => uuid,
         Some(uuid) => uuid,
         None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
         None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
     };
     };
-    let user = match User::find_by_uuid(user_uuid, &mut conn).await {
+    let user = match User::find_by_uuid(client_user_uuid, conn).await {
         Some(user) => user,
         Some(user) => user,
         None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
         None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
     };
     };
 
 
+    // Set the user_uuid here to be passed back used for event logging.
+    *user_uuid = Some(user.uuid.clone());
+
     // Check if the user is disabled
     // Check if the user is disabled
     if !user.enabled {
     if !user.enabled {
-        err!("This user has been disabled (API key login)", format!("IP: {}. Username: {}.", ip.ip, user.email))
+        err!(
+            "This user has been disabled (API key login)",
+            format!("IP: {}. Username: {}.", ip.ip, user.email),
+            ErrorEvent {
+                event: EventType::UserFailedLogIn
+            }
+        )
     }
     }
 
 
     // Check API key. Note that API key logins bypass 2FA.
     // Check API key. Note that API key logins bypass 2FA.
     let client_secret = data.client_secret.as_ref().unwrap();
     let client_secret = data.client_secret.as_ref().unwrap();
     if !user.check_valid_api_key(client_secret) {
     if !user.check_valid_api_key(client_secret) {
-        err!("Incorrect client_secret", format!("IP: {}. Username: {}.", ip.ip, user.email))
+        err!(
+            "Incorrect client_secret",
+            format!("IP: {}. Username: {}.", ip.ip, user.email),
+            ErrorEvent {
+                event: EventType::UserFailedLogIn
+            }
+        )
     }
     }
 
 
-    let (mut device, new_device) = get_device(&data, &mut conn, &user).await;
+    let (mut device, new_device) = get_device(&data, conn, &user).await;
 
 
     if CONFIG.mail_enabled() && new_device {
     if CONFIG.mail_enabled() && new_device {
         let now = Utc::now().naive_utc();
         let now = Utc::now().naive_utc();
@@ -229,15 +301,20 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J
             error!("Error sending new device email: {:#?}", e);
             error!("Error sending new device email: {:#?}", e);
 
 
             if CONFIG.require_device_email() {
             if CONFIG.require_device_email() {
-                err!("Could not send login notification email. Please contact your administrator.")
+                err!(
+                    "Could not send login notification email. Please contact your administrator.",
+                    ErrorEvent {
+                        event: EventType::UserFailedLogIn
+                    }
+                )
             }
             }
         }
         }
     }
     }
 
 
     // Common
     // Common
-    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, &mut conn).await;
+    let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
     let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
-    device.save(&mut conn).await?;
+    device.save(conn).await?;
 
 
     info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
     info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip);
 
 
@@ -261,7 +338,8 @@ async fn _api_key_login(data: ConnectData, mut conn: DbConn, ip: &ClientIp) -> J
 /// Retrieves an existing device or creates a new device from ConnectData and the User
 /// Retrieves an existing device or creates a new device from ConnectData and the User
 async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) {
 async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) {
     // On iOS, device_type sends "iOS", on others it sends a number
     // On iOS, device_type sends "iOS", on others it sends a number
-    let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(0);
+    // When unknown or unable to parse, return 14, which is 'Unknown Browser'
+    let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14);
     let device_id = data.device_identifier.clone().expect("No device id provided");
     let device_id = data.device_identifier.clone().expect("No device id provided");
     let device_name = data.device_name.clone().expect("No device name provided");
     let device_name = data.device_name.clone().expect("No device name provided");
 
 
@@ -338,7 +416,12 @@ async fn twofactor_auth(
                 }
                 }
             }
             }
         }
         }
-        _ => err!("Invalid two factor provider"),
+        _ => err!(
+            "Invalid two factor provider",
+            ErrorEvent {
+                event: EventType::UserFailedLogIn2fa
+            }
+        ),
     }
     }
 
 
     TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;
     TwoFactorIncomplete::mark_complete(user_uuid, &device.uuid, conn).await?;

+ 1 - 0
src/api/mod.rs

@@ -16,6 +16,7 @@ pub use crate::api::{
     core::routes as core_routes,
     core::routes as core_routes,
     core::two_factor::send_incomplete_2fa_notifications,
     core::two_factor::send_incomplete_2fa_notifications,
     core::{emergency_notification_reminder_job, emergency_request_timeout_job},
     core::{emergency_notification_reminder_job, emergency_request_timeout_job},
+    core::{event_cleanup_job, events_routes as core_events_routes},
     icons::routes as icons_routes,
     icons::routes as icons_routes,
     identity::routes as identity_routes,
     identity::routes as identity_routes,
     notifications::routes as notifications_routes,
     notifications::routes as notifications_routes,

+ 18 - 1
src/config.rs

@@ -371,6 +371,9 @@ make_config! {
         /// Emergency request timeout schedule |> Cron schedule of the job that grants emergency access requests that have met the required wait time.
         /// Emergency request timeout schedule |> Cron schedule of the job that grants emergency access requests that have met the required wait time.
         /// Defaults to hourly. Set blank to disable this job.
         /// Defaults to hourly. Set blank to disable this job.
         emergency_request_timeout_schedule:   String, false,  def,    "0 5 * * * *".to_string();
         emergency_request_timeout_schedule:   String, false,  def,    "0 5 * * * *".to_string();
+        /// Event cleanup schedule |> Cron schedule of the job that cleans old events from the event table.
+        /// Defaults to daily. Set blank to disable this job.
+        event_cleanup_schedule:   String, false,  def,    "0 10 0 * * *".to_string();
     },
     },
 
 
     /// General settings
     /// General settings
@@ -426,6 +429,8 @@ make_config! {
         signups_verify_resend_limit: u32, true, def,    6;
         signups_verify_resend_limit: u32, true, def,    6;
         /// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled
         /// Email domain whitelist |> Allow signups only from this list of comma-separated domains, even when signups are otherwise disabled
         signups_domains_whitelist: String, true, def,   String::new();
         signups_domains_whitelist: String, true, def,   String::new();
+        /// Enable event logging |> Enables event logging for organizations.
+        org_events_enabled:     bool,   false,  def,    false;
         /// Org creation users |> Allow org creation only by this list of comma-separated user emails.
         /// Org creation users |> Allow org creation only by this list of comma-separated user emails.
         /// Blank or 'all' means all users can create orgs; 'none' means no users can create orgs.
         /// Blank or 'all' means all users can create orgs; 'none' means no users can create orgs.
         org_creation_users:     String, true,   def,    String::new();
         org_creation_users:     String, true,   def,    String::new();
@@ -451,6 +456,9 @@ make_config! {
 
 
         /// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
         /// Invitation organization name |> Name shown in the invitation emails that don't come from a specific organization
         invitation_org_name:    String, true,   def,    "Vaultwarden".to_string();
         invitation_org_name:    String, true,   def,    "Vaultwarden".to_string();
+
+        /// Events days retain |> Number of days to retain events stored in the database. If unset, events are kept indefently.
+        events_days_retain:     i64,    false,   option;
     },
     },
 
 
     /// Advanced settings
     /// Advanced settings
@@ -738,26 +746,35 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
         err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour")
         err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour")
     }
     }
 
 
+    // Validate schedule crontab format
     if !cfg.send_purge_schedule.is_empty() && cfg.send_purge_schedule.parse::<Schedule>().is_err() {
     if !cfg.send_purge_schedule.is_empty() && cfg.send_purge_schedule.parse::<Schedule>().is_err() {
         err!("`SEND_PURGE_SCHEDULE` is not a valid cron expression")
         err!("`SEND_PURGE_SCHEDULE` is not a valid cron expression")
     }
     }
+
     if !cfg.trash_purge_schedule.is_empty() && cfg.trash_purge_schedule.parse::<Schedule>().is_err() {
     if !cfg.trash_purge_schedule.is_empty() && cfg.trash_purge_schedule.parse::<Schedule>().is_err() {
         err!("`TRASH_PURGE_SCHEDULE` is not a valid cron expression")
         err!("`TRASH_PURGE_SCHEDULE` is not a valid cron expression")
     }
     }
+
     if !cfg.incomplete_2fa_schedule.is_empty() && cfg.incomplete_2fa_schedule.parse::<Schedule>().is_err() {
     if !cfg.incomplete_2fa_schedule.is_empty() && cfg.incomplete_2fa_schedule.parse::<Schedule>().is_err() {
         err!("`INCOMPLETE_2FA_SCHEDULE` is not a valid cron expression")
         err!("`INCOMPLETE_2FA_SCHEDULE` is not a valid cron expression")
     }
     }
+
     if !cfg.emergency_notification_reminder_schedule.is_empty()
     if !cfg.emergency_notification_reminder_schedule.is_empty()
         && cfg.emergency_notification_reminder_schedule.parse::<Schedule>().is_err()
         && cfg.emergency_notification_reminder_schedule.parse::<Schedule>().is_err()
     {
     {
         err!("`EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE` is not a valid cron expression")
         err!("`EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE` is not a valid cron expression")
     }
     }
+
     if !cfg.emergency_request_timeout_schedule.is_empty()
     if !cfg.emergency_request_timeout_schedule.is_empty()
         && cfg.emergency_request_timeout_schedule.parse::<Schedule>().is_err()
         && cfg.emergency_request_timeout_schedule.parse::<Schedule>().is_err()
     {
     {
         err!("`EMERGENCY_REQUEST_TIMEOUT_SCHEDULE` is not a valid cron expression")
         err!("`EMERGENCY_REQUEST_TIMEOUT_SCHEDULE` is not a valid cron expression")
     }
     }
 
 
+    if !cfg.event_cleanup_schedule.is_empty() && cfg.event_cleanup_schedule.parse::<Schedule>().is_err() {
+        err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression")
+    }
+
     Ok(())
     Ok(())
 }
 }
 
 
@@ -1117,7 +1134,7 @@ fn case_helper<'reg, 'rc>(
     let value = param.value().clone();
     let value = param.value().clone();
 
 
     if h.params().iter().skip(1).any(|x| x.value() == &value) {
     if h.params().iter().skip(1).any(|x| x.value() == &value) {
-        h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or(Ok(()))
+        h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or_else(|| Ok(()))
     } else {
     } else {
         Ok(())
         Ok(())
     }
     }

+ 318 - 0
src/db/models/event.rs

@@ -0,0 +1,318 @@
+use crate::db::DbConn;
+use serde_json::Value;
+
+use crate::{api::EmptyResult, error::MapResult, CONFIG};
+
+use chrono::{Duration, NaiveDateTime, Utc};
+
+// https://bitwarden.com/help/event-logs/
+
+db_object! {
+    // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
+    // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Api/Models/Public/Response/EventResponseModel.cs
+    // Upstream SQL: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Sql/dbo/Tables/Event.sql
+    #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
+    #[diesel(table_name = event)]
+    #[diesel(primary_key(uuid))]
+    pub struct Event {
+        pub uuid: String,
+        pub event_type: i32, // EventType
+        pub user_uuid: Option<String>,
+        pub org_uuid: Option<String>,
+        pub cipher_uuid: Option<String>,
+        pub collection_uuid: Option<String>,
+        pub group_uuid: Option<String>,
+        pub org_user_uuid: Option<String>,
+        pub act_user_uuid: Option<String>,
+        // Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
+        pub device_type: Option<i32>,
+        pub ip_address: Option<String>,
+        pub event_date: NaiveDateTime,
+        pub policy_uuid: Option<String>,
+        pub provider_uuid: Option<String>,
+        pub provider_user_uuid: Option<String>,
+        pub provider_org_uuid: Option<String>,
+    }
+}
+
+// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/EventType.cs
+#[derive(Debug, Copy, Clone)]
+pub enum EventType {
+    // User
+    UserLoggedIn = 1000,
+    UserChangedPassword = 1001,
+    UserUpdated2fa = 1002,
+    UserDisabled2fa = 1003,
+    UserRecovered2fa = 1004,
+    UserFailedLogIn = 1005,
+    UserFailedLogIn2fa = 1006,
+    UserClientExportedVault = 1007,
+    // UserUpdatedTempPassword = 1008, // Not supported
+    // UserMigratedKeyToKeyConnector = 1009, // Not supported
+
+    // Cipher
+    CipherCreated = 1100,
+    CipherUpdated = 1101,
+    CipherDeleted = 1102,
+    CipherAttachmentCreated = 1103,
+    CipherAttachmentDeleted = 1104,
+    CipherShared = 1105,
+    CipherUpdatedCollections = 1106,
+    CipherClientViewed = 1107,
+    CipherClientToggledPasswordVisible = 1108,
+    CipherClientToggledHiddenFieldVisible = 1109,
+    CipherClientToggledCardCodeVisible = 1110,
+    CipherClientCopiedPassword = 1111,
+    CipherClientCopiedHiddenField = 1112,
+    CipherClientCopiedCardCode = 1113,
+    CipherClientAutofilled = 1114,
+    CipherSoftDeleted = 1115,
+    CipherRestored = 1116,
+    CipherClientToggledCardNumberVisible = 1117,
+
+    // Collection
+    CollectionCreated = 1300,
+    CollectionUpdated = 1301,
+    CollectionDeleted = 1302,
+
+    // Group
+    GroupCreated = 1400,
+    GroupUpdated = 1401,
+    GroupDeleted = 1402,
+
+    // OrganizationUser
+    OrganizationUserInvited = 1500,
+    OrganizationUserConfirmed = 1501,
+    OrganizationUserUpdated = 1502,
+    OrganizationUserRemoved = 1503,
+    OrganizationUserUpdatedGroups = 1504,
+    // OrganizationUserUnlinkedSso = 1505, // Not supported
+    // OrganizationUserResetPasswordEnroll = 1506, // Not supported
+    // OrganizationUserResetPasswordWithdraw = 1507, // Not supported
+    // OrganizationUserAdminResetPassword = 1508, // Not supported
+    // OrganizationUserResetSsoLink = 1509, // Not supported
+    // OrganizationUserFirstSsoLogin = 1510, // Not supported
+    OrganizationUserRevoked = 1511,
+    OrganizationUserRestored = 1512,
+
+    // Organization
+    OrganizationUpdated = 1600,
+    OrganizationPurgedVault = 1601,
+    OrganizationClientExportedVault = 1602,
+    // OrganizationVaultAccessed = 1603,
+    // OrganizationEnabledSso = 1604, // Not supported
+    // OrganizationDisabledSso = 1605, // Not supported
+    // OrganizationEnabledKeyConnector = 1606, // Not supported
+    // OrganizationDisabledKeyConnector = 1607, // Not supported
+    // OrganizationSponsorshipsSynced = 1608, // Not supported
+
+    // Policy
+    PolicyUpdated = 1700,
+    // Provider (Not yet supported)
+    // ProviderUserInvited = 1800, // Not supported
+    // ProviderUserConfirmed = 1801, // Not supported
+    // ProviderUserUpdated = 1802, // Not supported
+    // ProviderUserRemoved = 1803, // Not supported
+    // ProviderOrganizationCreated = 1900, // Not supported
+    // ProviderOrganizationAdded = 1901, // Not supported
+    // ProviderOrganizationRemoved = 1902, // Not supported
+    // ProviderOrganizationVaultAccessed = 1903, // Not supported
+}
+
+/// Local methods
+impl Event {
+    pub fn new(event_type: i32, event_date: Option<NaiveDateTime>) -> Self {
+        let event_date = match event_date {
+            Some(d) => d,
+            None => Utc::now().naive_utc(),
+        };
+
+        Self {
+            uuid: crate::util::get_uuid(),
+            event_type,
+            user_uuid: None,
+            org_uuid: None,
+            cipher_uuid: None,
+            collection_uuid: None,
+            group_uuid: None,
+            org_user_uuid: None,
+            act_user_uuid: None,
+            device_type: None,
+            ip_address: None,
+            event_date,
+            policy_uuid: None,
+            provider_uuid: None,
+            provider_user_uuid: None,
+            provider_org_uuid: None,
+        }
+    }
+
+    pub fn to_json(&self) -> Value {
+        use crate::util::format_date;
+
+        json!({
+            "type": self.event_type,
+            "userId": self.user_uuid,
+            "organizationId": self.org_uuid,
+            "cipherId": self.cipher_uuid,
+            "collectionId": self.collection_uuid,
+            "groupId": self.group_uuid,
+            "organizationUserId": self.org_user_uuid,
+            "actingUserId": self.act_user_uuid,
+            "date": format_date(&self.event_date),
+            "deviceType": self.device_type,
+            "ipAddress": self.ip_address,
+            "policyId": self.policy_uuid,
+            "providerId": self.provider_uuid,
+            "providerUserId": self.provider_user_uuid,
+            "providerOrganizationId": self.provider_org_uuid,
+            // "installationId": null, // Not supported
+        })
+    }
+}
+
+/// Database methods
+/// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
+impl Event {
+    pub const PAGE_SIZE: i64 = 30;
+
+    /// #############
+    /// Basic Queries
+    pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+        db_run! { conn:
+            sqlite, mysql {
+                diesel::replace_into(event::table)
+                .values(EventDb::to_db(self))
+                .execute(conn)
+                .map_res("Error saving event")
+            }
+            postgresql {
+                diesel::insert_into(event::table)
+                .values(EventDb::to_db(self))
+                .on_conflict(event::uuid)
+                .do_update()
+                .set(EventDb::to_db(self))
+                .execute(conn)
+                .map_res("Error saving event")
+            }
+        }
+    }
+
+    pub async fn save_user_event(events: Vec<Event>, conn: &mut DbConn) -> EmptyResult {
+        // Special save function which is able to handle multiple events.
+        // SQLite doesn't support the DEFAULT argument, and does not support inserting multiple values at the same time.
+        // MySQL and PostgreSQL do.
+        // We also ignore duplicate if they ever will exists, else it could break the whole flow.
+        db_run! { conn:
+            // Unfortunately SQLite does not support inserting multiple records at the same time
+            // We loop through the events here and insert them one at a time.
+            sqlite {
+                for event in events {
+                    diesel::insert_or_ignore_into(event::table)
+                    .values(EventDb::to_db(&event))
+                    .execute(conn)
+                    .unwrap_or_default();
+                }
+                Ok(())
+            }
+            mysql {
+                let events: Vec<EventDb> = events.iter().map(EventDb::to_db).collect();
+                diesel::insert_or_ignore_into(event::table)
+                .values(&events)
+                .execute(conn)
+                .unwrap_or_default();
+                Ok(())
+            }
+            postgresql {
+                let events: Vec<EventDb> = events.iter().map(EventDb::to_db).collect();
+                diesel::insert_into(event::table)
+                .values(&events)
+                .on_conflict_do_nothing()
+                .execute(conn)
+                .unwrap_or_default();
+                Ok(())
+            }
+        }
+    }
+
+    pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+        db_run! { conn: {
+            diesel::delete(event::table.filter(event::uuid.eq(self.uuid)))
+                .execute(conn)
+                .map_res("Error deleting event")
+        }}
+    }
+
+    /// ##############
+    /// Custom Queries
+    pub async fn find_by_organization_uuid(
+        org_uuid: &str,
+        start: &NaiveDateTime,
+        end: &NaiveDateTime,
+        conn: &mut DbConn,
+    ) -> Vec<Self> {
+        db_run! { conn: {
+            event::table
+                .filter(event::org_uuid.eq(org_uuid))
+                .filter(event::event_date.between(start, end))
+                .order_by(event::event_date.desc())
+                .limit(Self::PAGE_SIZE)
+                .load::<EventDb>(conn)
+                .expect("Error filtering events")
+                .from_db()
+        }}
+    }
+
+    pub async fn find_by_org_and_user_org(
+        org_uuid: &str,
+        user_org_uuid: &str,
+        start: &NaiveDateTime,
+        end: &NaiveDateTime,
+        conn: &mut DbConn,
+    ) -> Vec<Self> {
+        db_run! { conn: {
+            event::table
+                .inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid)))
+                .filter(event::org_uuid.eq(org_uuid))
+                .filter(event::event_date.between(start, end))
+                .filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
+                .select(event::all_columns)
+                .order_by(event::event_date.desc())
+                .limit(Self::PAGE_SIZE)
+                .load::<EventDb>(conn)
+                .expect("Error filtering events")
+                .from_db()
+        }}
+    }
+
+    pub async fn find_by_cipher_uuid(
+        cipher_uuid: &str,
+        start: &NaiveDateTime,
+        end: &NaiveDateTime,
+        conn: &mut DbConn,
+    ) -> Vec<Self> {
+        db_run! { conn: {
+            event::table
+                .filter(event::cipher_uuid.eq(cipher_uuid))
+                .filter(event::event_date.between(start, end))
+                .order_by(event::event_date.desc())
+                .limit(Self::PAGE_SIZE)
+                .load::<EventDb>(conn)
+                .expect("Error filtering events")
+                .from_db()
+        }}
+    }
+
+    pub async fn clean_events(conn: &mut DbConn) -> EmptyResult {
+        if let Some(days_to_retain) = CONFIG.events_days_retain() {
+            let dt = Utc::now().naive_utc() - Duration::days(days_to_retain);
+            db_run! { conn: {
+                diesel::delete(event::table.filter(event::event_date.lt(dt)))
+                .execute(conn)
+                .map_res("Error cleaning old events")
+            }}
+        } else {
+            Ok(())
+        }
+    }
+}

+ 2 - 0
src/db/models/mod.rs

@@ -3,6 +3,7 @@ mod cipher;
 mod collection;
 mod collection;
 mod device;
 mod device;
 mod emergency_access;
 mod emergency_access;
+mod event;
 mod favorite;
 mod favorite;
 mod folder;
 mod folder;
 mod group;
 mod group;
@@ -18,6 +19,7 @@ pub use self::cipher::Cipher;
 pub use self::collection::{Collection, CollectionCipher, CollectionUser};
 pub use self::collection::{Collection, CollectionCipher, CollectionUser};
 pub use self::device::Device;
 pub use self::device::Device;
 pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
 pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
+pub use self::event::{Event, EventType};
 pub use self::favorite::Favorite;
 pub use self::favorite::Favorite;
 pub use self::folder::{Folder, FolderCipher};
 pub use self::folder::{Folder, FolderCipher};
 pub use self::group::{CollectionGroup, Group, GroupUser};
 pub use self::group::{CollectionGroup, Group, GroupUser};

+ 25 - 3
src/db/models/organization.rs

@@ -3,6 +3,7 @@ use serde_json::Value;
 use std::cmp::Ordering;
 use std::cmp::Ordering;
 
 
 use super::{CollectionUser, GroupUser, OrgPolicy, OrgPolicyType, User};
 use super::{CollectionUser, GroupUser, OrgPolicy, OrgPolicyType, User};
+use crate::CONFIG;
 
 
 db_object! {
 db_object! {
     #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
     #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@@ -147,7 +148,7 @@ impl Organization {
             "MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
             "MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
             "Use2fa": true,
             "Use2fa": true,
             "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
             "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
-            "UseEvents": false, // Not supported
+            "UseEvents": CONFIG.org_events_enabled(),
             "UseGroups": true,
             "UseGroups": true,
             "UseTotp": true,
             "UseTotp": true,
             "UsePolicies": true,
             "UsePolicies": true,
@@ -300,10 +301,9 @@ impl UserOrganization {
             "Seats": 10, // The value doesn't matter, we don't check server-side
             "Seats": 10, // The value doesn't matter, we don't check server-side
             "MaxCollections": 10, // The value doesn't matter, we don't check server-side
             "MaxCollections": 10, // The value doesn't matter, we don't check server-side
             "UsersGetPremium": true,
             "UsersGetPremium": true,
-
             "Use2fa": true,
             "Use2fa": true,
             "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
             "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet)
-            "UseEvents": false, // Not supported
+            "UseEvents": CONFIG.org_events_enabled(),
             "UseGroups": true,
             "UseGroups": true,
             "UseTotp": true,
             "UseTotp": true,
             // "UseScim": false, // Not supported (Not AGPLv3 Licensed)
             // "UseScim": false, // Not supported (Not AGPLv3 Licensed)
@@ -629,6 +629,16 @@ impl UserOrganization {
         }}
         }}
     }
     }
 
 
+    pub async fn get_org_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
+        db_run! { conn: {
+            users_organizations::table
+                .filter(users_organizations::user_uuid.eq(user_uuid))
+                .select(users_organizations::org_uuid)
+                .load::<String>(conn)
+                .unwrap_or_default()
+        }}
+    }
+
     pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Vec<Self> {
     pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Vec<Self> {
         db_run! { conn: {
         db_run! { conn: {
             users_organizations::table
             users_organizations::table
@@ -670,6 +680,18 @@ impl UserOrganization {
         }}
         }}
     }
     }
 
 
+    pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool {
+        db_run! { conn: {
+            users_organizations::table
+            .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()))))
+            .filter(users_organizations::user_uuid.eq(user_uuid))
+            .filter(users_organizations::atype.eq_any(vec![UserOrgType::Owner as i32, UserOrgType::Admin as i32]))
+            .count()
+            .first::<i64>(conn)
+            .ok().unwrap_or(0) != 0
+        }}
+    }
+
     pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
     pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
         db_run! { conn: {
         db_run! { conn: {
             users_organizations::table
             users_organizations::table

+ 23 - 0
src/db/schemas/mysql/schema.rs

@@ -55,6 +55,27 @@ table! {
     }
     }
 }
 }
 
 
+table! {
+    event (uuid) {
+        uuid -> Varchar,
+        event_type -> Integer,
+        user_uuid -> Nullable<Varchar>,
+        org_uuid -> Nullable<Varchar>,
+        cipher_uuid -> Nullable<Varchar>,
+        collection_uuid -> Nullable<Varchar>,
+        group_uuid -> Nullable<Varchar>,
+        org_user_uuid -> Nullable<Varchar>,
+        act_user_uuid -> Nullable<Varchar>,
+        device_type -> Nullable<Integer>,
+        ip_address -> Nullable<Text>,
+        event_date -> Timestamp,
+        policy_uuid -> Nullable<Varchar>,
+        provider_uuid -> Nullable<Varchar>,
+        provider_user_uuid -> Nullable<Varchar>,
+        provider_org_uuid -> Nullable<Varchar>,
+    }
+}
+
 table! {
 table! {
     favorites (user_uuid, cipher_uuid) {
     favorites (user_uuid, cipher_uuid) {
         user_uuid -> Text,
         user_uuid -> Text,
@@ -272,6 +293,7 @@ joinable!(groups_users -> users_organizations (users_organizations_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
+joinable!(event -> users_organizations (uuid));
 
 
 allow_tables_to_appear_in_same_query!(
 allow_tables_to_appear_in_same_query!(
     attachments,
     attachments,
@@ -293,4 +315,5 @@ allow_tables_to_appear_in_same_query!(
     groups,
     groups,
     groups_users,
     groups_users,
     collections_groups,
     collections_groups,
+    event,
 );
 );

+ 23 - 0
src/db/schemas/postgresql/schema.rs

@@ -55,6 +55,27 @@ table! {
     }
     }
 }
 }
 
 
+table! {
+    event (uuid) {
+        uuid -> Text,
+        event_type -> Integer,
+        user_uuid -> Nullable<Text>,
+        org_uuid -> Nullable<Text>,
+        cipher_uuid -> Nullable<Text>,
+        collection_uuid -> Nullable<Text>,
+        group_uuid -> Nullable<Text>,
+        org_user_uuid -> Nullable<Text>,
+        act_user_uuid -> Nullable<Text>,
+        device_type -> Nullable<Integer>,
+        ip_address -> Nullable<Text>,
+        event_date -> Timestamp,
+        policy_uuid -> Nullable<Text>,
+        provider_uuid -> Nullable<Text>,
+        provider_user_uuid -> Nullable<Text>,
+        provider_org_uuid -> Nullable<Text>,
+    }
+}
+
 table! {
 table! {
     favorites (user_uuid, cipher_uuid) {
     favorites (user_uuid, cipher_uuid) {
         user_uuid -> Text,
         user_uuid -> Text,
@@ -272,6 +293,7 @@ joinable!(groups_users -> users_organizations (users_organizations_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
+joinable!(event -> users_organizations (uuid));
 
 
 allow_tables_to_appear_in_same_query!(
 allow_tables_to_appear_in_same_query!(
     attachments,
     attachments,
@@ -293,4 +315,5 @@ allow_tables_to_appear_in_same_query!(
     groups,
     groups,
     groups_users,
     groups_users,
     collections_groups,
     collections_groups,
+    event,
 );
 );

+ 24 - 0
src/db/schemas/sqlite/schema.rs

@@ -55,6 +55,27 @@ table! {
     }
     }
 }
 }
 
 
+table! {
+    event (uuid) {
+        uuid -> Text,
+        event_type -> Integer,
+        user_uuid -> Nullable<Text>,
+        org_uuid -> Nullable<Text>,
+        cipher_uuid -> Nullable<Text>,
+        collection_uuid -> Nullable<Text>,
+        group_uuid -> Nullable<Text>,
+        org_user_uuid -> Nullable<Text>,
+        act_user_uuid -> Nullable<Text>,
+        device_type -> Nullable<Integer>,
+        ip_address -> Nullable<Text>,
+        event_date -> Timestamp,
+        policy_uuid -> Nullable<Text>,
+        provider_uuid -> Nullable<Text>,
+        provider_user_uuid -> Nullable<Text>,
+        provider_org_uuid -> Nullable<Text>,
+    }
+}
+
 table! {
 table! {
     favorites (user_uuid, cipher_uuid) {
     favorites (user_uuid, cipher_uuid) {
         user_uuid -> Text,
         user_uuid -> Text,
@@ -266,12 +287,14 @@ joinable!(users_collections -> collections (collection_uuid));
 joinable!(users_collections -> users (user_uuid));
 joinable!(users_collections -> users (user_uuid));
 joinable!(users_organizations -> organizations (org_uuid));
 joinable!(users_organizations -> organizations (org_uuid));
 joinable!(users_organizations -> users (user_uuid));
 joinable!(users_organizations -> users (user_uuid));
+joinable!(users_organizations -> ciphers (org_uuid));
 joinable!(emergency_access -> users (grantor_uuid));
 joinable!(emergency_access -> users (grantor_uuid));
 joinable!(groups -> organizations (organizations_uuid));
 joinable!(groups -> organizations (organizations_uuid));
 joinable!(groups_users -> users_organizations (users_organizations_uuid));
 joinable!(groups_users -> users_organizations (users_organizations_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(groups_users -> groups (groups_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> collections (collections_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
 joinable!(collections_groups -> groups (groups_uuid));
+joinable!(event -> users_organizations (uuid));
 
 
 allow_tables_to_appear_in_same_query!(
 allow_tables_to_appear_in_same_query!(
     attachments,
     attachments,
@@ -293,4 +316,5 @@ allow_tables_to_appear_in_same_query!(
     groups,
     groups,
     groups_users,
     groups_users,
     collections_groups,
     collections_groups,
+    event,
 );
 );

+ 30 - 4
src/error.rs

@@ -1,6 +1,7 @@
 //
 //
 // Error generator macro
 // Error generator macro
 //
 //
+use crate::db::models::EventType;
 use std::error::Error as StdError;
 use std::error::Error as StdError;
 
 
 macro_rules! make_error {
 macro_rules! make_error {
@@ -8,14 +9,17 @@ macro_rules! make_error {
         const BAD_REQUEST: u16 = 400;
         const BAD_REQUEST: u16 = 400;
 
 
         pub enum ErrorKind { $($name( $ty )),+ }
         pub enum ErrorKind { $($name( $ty )),+ }
-        pub struct Error { message: String, error: ErrorKind, error_code: u16 }
+
+        #[derive(Debug)]
+        pub struct ErrorEvent { pub event: EventType }
+        pub struct Error { message: String, error: ErrorKind, error_code: u16, event: Option<ErrorEvent> }
 
 
         $(impl From<$ty> for Error {
         $(impl From<$ty> for Error {
             fn from(err: $ty) -> Self { Error::from((stringify!($name), err)) }
             fn from(err: $ty) -> Self { Error::from((stringify!($name), err)) }
         })+
         })+
         $(impl<S: Into<String>> From<(S, $ty)> for Error {
         $(impl<S: Into<String>> From<(S, $ty)> for Error {
             fn from(val: (S, $ty)) -> Self {
             fn from(val: (S, $ty)) -> Self {
-                Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST }
+                Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST, event: None }
             }
             }
         })+
         })+
         impl StdError for Error {
         impl StdError for Error {
@@ -130,6 +134,16 @@ impl Error {
         self.error_code = code;
         self.error_code = code;
         self
         self
     }
     }
+
+    #[must_use]
+    pub fn with_event(mut self, event: ErrorEvent) -> Self {
+        self.event = Some(event);
+        self
+    }
+
+    pub fn get_event(&self) -> &Option<ErrorEvent> {
+        &self.event
+    }
 }
 }
 
 
 pub trait MapResult<S> {
 pub trait MapResult<S> {
@@ -216,12 +230,21 @@ macro_rules! err {
         error!("{}", $msg);
         error!("{}", $msg);
         return Err($crate::error::Error::new($msg, $msg));
         return Err($crate::error::Error::new($msg, $msg));
     }};
     }};
+    ($msg:expr, ErrorEvent $err_event:tt) => {{
+        error!("{}", $msg);
+        return Err($crate::error::Error::new($msg, $msg).with_event($crate::error::ErrorEvent $err_event));
+    }};
     ($usr_msg:expr, $log_value:expr) => {{
     ($usr_msg:expr, $log_value:expr) => {{
         error!("{}. {}", $usr_msg, $log_value);
         error!("{}. {}", $usr_msg, $log_value);
         return Err($crate::error::Error::new($usr_msg, $log_value));
         return Err($crate::error::Error::new($usr_msg, $log_value));
     }};
     }};
+    ($usr_msg:expr, $log_value:expr, ErrorEvent $err_event:tt) => {{
+        error!("{}. {}", $usr_msg, $log_value);
+        return Err($crate::error::Error::new($usr_msg, $log_value).with_event($crate::error::ErrorEvent $err_event));
+    }};
 }
 }
 
 
+#[macro_export]
 macro_rules! err_silent {
 macro_rules! err_silent {
     ($msg:expr) => {{
     ($msg:expr) => {{
         return Err($crate::error::Error::new($msg, $msg));
         return Err($crate::error::Error::new($msg, $msg));
@@ -233,11 +256,11 @@ macro_rules! err_silent {
 
 
 #[macro_export]
 #[macro_export]
 macro_rules! err_code {
 macro_rules! err_code {
-    ($msg:expr, $err_code: expr) => {{
+    ($msg:expr, $err_code:expr) => {{
         error!("{}", $msg);
         error!("{}", $msg);
         return Err($crate::error::Error::new($msg, $msg).with_code($err_code));
         return Err($crate::error::Error::new($msg, $msg).with_code($err_code));
     }};
     }};
-    ($usr_msg:expr, $log_value:expr, $err_code: expr) => {{
+    ($usr_msg:expr, $log_value:expr, $err_code:expr) => {{
         error!("{}. {}", $usr_msg, $log_value);
         error!("{}. {}", $usr_msg, $log_value);
         return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
         return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code));
     }};
     }};
@@ -260,6 +283,9 @@ macro_rules! err_json {
     ($expr:expr, $log_value:expr) => {{
     ($expr:expr, $log_value:expr) => {{
         return Err(($log_value, $expr).into());
         return Err(($log_value, $expr).into());
     }};
     }};
+    ($expr:expr, $log_value:expr, $err_event:expr, ErrorEvent) => {{
+        return Err(($log_value, $expr).into().with_event($err_event));
+    }};
 }
 }
 
 
 #[macro_export]
 #[macro_export]

+ 11 - 0
src/main.rs

@@ -430,6 +430,7 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error>
         .mount([basepath, "/"].concat(), api::web_routes())
         .mount([basepath, "/"].concat(), api::web_routes())
         .mount([basepath, "/api"].concat(), api::core_routes())
         .mount([basepath, "/api"].concat(), api::core_routes())
         .mount([basepath, "/admin"].concat(), api::admin_routes())
         .mount([basepath, "/admin"].concat(), api::admin_routes())
+        .mount([basepath, "/events"].concat(), api::core_events_routes())
         .mount([basepath, "/identity"].concat(), api::identity_routes())
         .mount([basepath, "/identity"].concat(), api::identity_routes())
         .mount([basepath, "/icons"].concat(), api::icons_routes())
         .mount([basepath, "/icons"].concat(), api::icons_routes())
         .mount([basepath, "/notifications"].concat(), api::notifications_routes())
         .mount([basepath, "/notifications"].concat(), api::notifications_routes())
@@ -511,6 +512,16 @@ async fn schedule_jobs(pool: db::DbPool) {
                 }));
                 }));
             }
             }
 
 
+            // Cleanup the event table of records x days old.
+            if CONFIG.org_events_enabled()
+                && !CONFIG.event_cleanup_schedule().is_empty()
+                && CONFIG.events_days_retain().is_some()
+            {
+                sched.add(Job::new(CONFIG.event_cleanup_schedule().parse().unwrap(), || {
+                    runtime.spawn(api::event_cleanup_job(pool.clone()));
+                }));
+            }
+
             // Periodically check for jobs to run. We probably won't need any
             // Periodically check for jobs to run. We probably won't need any
             // jobs that run more often than once a minute, so a default poll
             // jobs that run more often than once a minute, so a default poll
             // interval of 30 seconds should be sufficient. Users who want to
             // interval of 30 seconds should be sufficient. Users who want to

+ 8 - 1
src/util.rs

@@ -456,10 +456,13 @@ pub fn get_env_bool(key: &str) -> Option<bool> {
 
 
 use chrono::{DateTime, Local, NaiveDateTime, TimeZone};
 use chrono::{DateTime, Local, NaiveDateTime, TimeZone};
 
 
+// Format used by Bitwarden API
+const DATETIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6fZ";
+
 /// Formats a UTC-offset `NaiveDateTime` in the format used by Bitwarden API
 /// Formats a UTC-offset `NaiveDateTime` in the format used by Bitwarden API
 /// responses with "date" fields (`CreationDate`, `RevisionDate`, etc.).
 /// responses with "date" fields (`CreationDate`, `RevisionDate`, etc.).
 pub fn format_date(dt: &NaiveDateTime) -> String {
 pub fn format_date(dt: &NaiveDateTime) -> String {
-    dt.format("%Y-%m-%dT%H:%M:%S%.6fZ").to_string()
+    dt.format(DATETIME_FORMAT).to_string()
 }
 }
 
 
 /// Formats a `DateTime<Local>` using the specified format string.
 /// Formats a `DateTime<Local>` using the specified format string.
@@ -500,6 +503,10 @@ pub fn format_datetime_http(dt: &DateTime<Local>) -> String {
     expiry_time.to_rfc2822().replace("+0000", "GMT")
     expiry_time.to_rfc2822().replace("+0000", "GMT")
 }
 }
 
 
+pub fn parse_date(date: &str) -> NaiveDateTime {
+    NaiveDateTime::parse_from_str(date, DATETIME_FORMAT).unwrap()
+}
+
 //
 //
 // Deployment environment methods
 // Deployment environment methods
 //
 //