From 44dde159f6129508c2e3c301c3ddd7085f23db6f Mon Sep 17 00:00:00 2001 From: James Date: Sat, 28 Feb 2026 05:38:02 -0500 Subject: [PATCH] Add ops: systemd service, deploy scripts, backup, healthcheck, README --- Makefile | 64 +-- README.md | 210 ++++++++ api/handlers.go | 548 ++++++++++++++++++++ api/middleware.go | 25 + api/middleware_test.go | 416 +++++++++++++++ deploy/backup.sh | 77 +++ deploy/dealspace.service | 24 + deploy/env.template | 63 +++ deploy/healthcheck.sh | 14 + deploy/install.sh | 113 ++++ docs/soc2/data-retention-policy.md | 189 +++++++ docs/soc2/disaster-recovery-plan.md | 300 +++++++++++ docs/soc2/incident-response-plan.md | 288 +++++++++++ docs/soc2/risk-assessment.md | 204 ++++++++ docs/soc2/security-policy.md | 288 +++++++++++ docs/soc2/soc2-self-assessment-2026.md | 479 +++++++++++++++++ go.mod | 17 +- go.sum | 26 +- lib/crypto_test.go | 251 +++++++++ lib/dbcore.go | 143 ++++++ lib/dbcore_test.go | 627 +++++++++++++++++++++++ lib/mailer.go | 282 ++++++++++ lib/rbac_test.go | 387 ++++++++++++++ lib/watermark.go | 582 ++++++++++++++++++++- portal/emails/answer_approved.html | 85 ++++ portal/emails/answer_rejected.html | 76 +++ portal/emails/answer_submitted.html | 92 ++++ portal/emails/invite.html | 76 +++ portal/emails/request_forwarded.html | 85 ++++ portal/emails/tasks_assigned.html | 87 ++++ portal/templates/app/tasks.html | 169 ++++++ portal/templates/auth/login.html | 94 ++++ portal/templates/auth/setup.html | 100 ++++ website/dpa.html | 1 + website/features.html | 1 + website/index.html | 1 + website/pricing.html | 1 + website/privacy.html | 1 + website/security.html | 9 +- website/soc2.html | 679 +++++++++++++++++++++++++ website/terms.html | 1 + 41 files changed, 7108 insertions(+), 67 deletions(-) create mode 100644 README.md create mode 100644 api/middleware_test.go create mode 100755 deploy/backup.sh create mode 100644 deploy/dealspace.service create mode 100644 deploy/env.template create mode 100755 deploy/healthcheck.sh create mode 100755 deploy/install.sh create mode 100644 docs/soc2/data-retention-policy.md create mode 100644 docs/soc2/disaster-recovery-plan.md create mode 100644 docs/soc2/incident-response-plan.md create mode 100644 docs/soc2/risk-assessment.md create mode 100644 docs/soc2/security-policy.md create mode 100644 docs/soc2/soc2-self-assessment-2026.md create mode 100644 lib/crypto_test.go create mode 100644 lib/dbcore_test.go create mode 100644 lib/mailer.go create mode 100644 lib/rbac_test.go create mode 100644 portal/emails/answer_approved.html create mode 100644 portal/emails/answer_rejected.html create mode 100644 portal/emails/answer_submitted.html create mode 100644 portal/emails/invite.html create mode 100644 portal/emails/request_forwarded.html create mode 100644 portal/emails/tasks_assigned.html create mode 100644 portal/templates/app/tasks.html create mode 100644 portal/templates/auth/login.html create mode 100644 portal/templates/auth/setup.html create mode 100644 website/soc2.html diff --git a/Makefile b/Makefile index 5870d55..ba8b6b8 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,10 @@ BINARY := dealspace BUILD_DIR := build -REMOTE := root@82.24.174.112 -REMOTE_PATH := /opt/dealspace/bin/dealspace +SHANNON := root@82.24.174.112 +DEPLOY_PATH := /opt/dealspace REMOTE_MIG := /opt/dealspace/migrations -.PHONY: build run test clean deploy install-service +.PHONY: build build-linux run test clean deploy install-service logs ssh health build: @mkdir -p $(BUILD_DIR) @@ -15,43 +15,43 @@ build: @rm -rf cmd/server/website @echo "Built $(BUILD_DIR)/$(BINARY)" +build-linux: + @mkdir -p $(BUILD_DIR) + @rm -rf cmd/server/website + @cp -r website cmd/server/website + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 PATH=$(PATH):/usr/local/go/bin go build -tags fts5 -o $(BUILD_DIR)/$(BINARY)-linux ./cmd/server + @rm -rf cmd/server/website + @echo "Built $(BUILD_DIR)/$(BINARY)-linux" + run: build $(BUILD_DIR)/$(BINARY) test: - go test ./... + CGO_ENABLED=1 PATH=$(PATH):/usr/local/go/bin go test -tags fts5 ./... -v clean: rm -rf $(BUILD_DIR) rm -rf cmd/server/website -deploy: clean - @mkdir -p $(BUILD_DIR) - @rm -rf cmd/server/website - @cp -r website cmd/server/website - CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o $(BUILD_DIR)/$(BINARY) ./cmd/server - @rm -rf cmd/server/website - scp $(BUILD_DIR)/$(BINARY) $(REMOTE):$(REMOTE_PATH) - scp -r migrations $(REMOTE):$(REMOTE_MIG) - ssh $(REMOTE) 'systemctl restart dealspace' - @echo "Deployed to $(REMOTE)" +deploy: clean build-linux + ssh $(SHANNON) "systemctl stop dealspace || true" + scp $(BUILD_DIR)/$(BINARY)-linux $(SHANNON):$(DEPLOY_PATH)/bin/dealspace + scp -r migrations $(SHANNON):$(REMOTE_MIG) + ssh $(SHANNON) "chmod +x $(DEPLOY_PATH)/bin/dealspace && systemctl start dealspace && sleep 2 && curl -s http://localhost:8080/health" + @echo "Deployed ✓" install-service: - @echo '[Unit]' > /tmp/dealspace.service - @echo 'Description=Dealspace' >> /tmp/dealspace.service - @echo 'After=network.target' >> /tmp/dealspace.service - @echo '' >> /tmp/dealspace.service - @echo '[Service]' >> /tmp/dealspace.service - @echo 'Type=simple' >> /tmp/dealspace.service - @echo 'User=root' >> /tmp/dealspace.service - @echo 'WorkingDirectory=/opt/dealspace' >> /tmp/dealspace.service - @echo 'EnvironmentFile=/opt/dealspace/.env' >> /tmp/dealspace.service - @echo 'ExecStart=/opt/dealspace/bin/dealspace' >> /tmp/dealspace.service - @echo 'Restart=always' >> /tmp/dealspace.service - @echo 'RestartSec=5' >> /tmp/dealspace.service - @echo '' >> /tmp/dealspace.service - @echo '[Install]' >> /tmp/dealspace.service - @echo 'WantedBy=multi-user.target' >> /tmp/dealspace.service - scp /tmp/dealspace.service $(REMOTE):/etc/systemd/system/dealspace.service - ssh $(REMOTE) 'systemctl daemon-reload && systemctl enable dealspace' - @echo "Service installed" + scp deploy/dealspace.service $(SHANNON):/etc/systemd/system/ + scp deploy/backup.sh $(SHANNON):$(DEPLOY_PATH)/ + scp deploy/healthcheck.sh $(SHANNON):$(DEPLOY_PATH)/ + ssh $(SHANNON) "chmod +x $(DEPLOY_PATH)/backup.sh $(DEPLOY_PATH)/healthcheck.sh && systemctl daemon-reload && systemctl enable dealspace" + @echo "Service installed ✓" + +logs: + ssh $(SHANNON) "journalctl -u dealspace -f --no-pager" + +ssh: + ssh $(SHANNON) + +health: + curl -s https://muskepo.com/health | python3 -m json.tool diff --git a/README.md b/README.md new file mode 100644 index 0000000..a206004 --- /dev/null +++ b/README.md @@ -0,0 +1,210 @@ +# Dealspace + +M&A deal management platform for investment banks, sellers, and buyers. + +## What is Dealspace? + +A workflow platform where M&A deals are managed through a structured request-and-answer system. Investment banks issue request lists, sellers provide answers with supporting documents, and buyers access a data room with vetted information. + +**Not** a document repository with features bolted on. Designed from first principles around the core primitive: the **Request**. + +## Architecture + +``` +Internet + │ + ▼ +┌─────────┐ +│ Caddy │ (TLS termination, reverse proxy) +└────┬────┘ + │ :8080 + ▼ +┌─────────────┐ +│ Dealspace │ (Go binary, single process) +│ │ +│ ┌───────┐ │ +│ │SQLite │ │ (FTS5, encrypted at rest) +│ │ + WAL │ │ +│ └───────┘ │ +│ ┌───────┐ │ +│ │ Store │ │ (Encrypted object storage) +│ └───────┘ │ +└─────────────┘ +``` + +**Key decisions:** +- SQLite with FTS5 for full-text search +- All sensitive data encrypted with AES-256-GCM +- Blind indexes (HMAC-SHA256) for searchable encrypted fields +- Per-request watermarking on document downloads +- Zero external database dependencies + +## Quick Start + +### Development + +```bash +# Clone +git clone git@zurich.inou.com:dealspace.git +cd dealspace + +# Build +make build + +# Run locally +make run +``` + +### Production Deployment + +```bash +# First time: install service on Shannon +ssh root@82.24.174.112 +cd /tmp +scp -r yourhost:/path/to/dealspace/deploy . +cd deploy +./install.sh + +# Deploy updates (from dev machine) +make deploy + +# View logs +make logs +``` + +## Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `MASTER_KEY` | **Yes** | — | 32-byte hex key for encryption. **Never change after data exists.** | +| `DB_PATH` | No | `./dealspace.db` | SQLite database path | +| `STORE_PATH` | No | `./store` | Object storage directory | +| `PORT` | No | `8080` | HTTP listen port | +| `ENV` | No | `development` | `development` or `production` | +| `SESSION_TTL_HOURS` | No | `1` | Session token TTL | +| `REFRESH_TTL_DAYS` | No | `7` | Refresh token TTL | +| `SMTP_HOST` | No | — | SMTP server for email | +| `SMTP_PORT` | No | `587` | SMTP port | +| `SMTP_USER` | No | — | SMTP username | +| `SMTP_PASS` | No | — | SMTP password | +| `SMTP_FROM` | No | — | From address for emails | +| `FIREWORKS_API_KEY` | No | — | Fireworks AI API key for embeddings | +| `NTFY_URL` | No | — | ntfy URL for alerts | +| `NTFY_TOKEN` | No | — | ntfy auth token | + +See `deploy/env.template` for a complete example. + +## Development + +### Prerequisites + +- Go 1.22+ +- SQLite3 with FTS5 support +- CGO enabled (required for SQLite) + +### Building + +```bash +# Development build +make build + +# Linux production build (cross-compile) +make build-linux + +# Run tests +make test + +# Clean build artifacts +make clean +``` + +### Testing + +```bash +# Run all tests +make test + +# Run with verbose output +CGO_ENABLED=1 go test -tags fts5 ./... -v +``` + +### Project Structure + +``` +dealspace/ +├── cmd/server/ # Entry point, config loading +├── lib/ # Core business logic +│ ├── types.go # All shared types +│ ├── dbcore.go # EntryRead/Write/Delete (the single throat) +│ ├── rbac.go # Access control +│ ├── crypto.go # Encryption, blind indexes +│ ├── store.go # Object storage +│ └── ... +├── api/ # HTTP handlers (thin layer) +├── portal/ # HTML templates, static assets +├── mcp/ # MCP server for AI tools +├── migrations/ # SQL migration files +├── deploy/ # Deployment scripts +└── website/ # Public marketing site +``` + +## Operations + +### Backup + +Daily backups run automatically at 3 AM via cron. Backups are: +- Hot SQLite backups (safe with WAL) +- Compressed with gzip +- Retained for 30 days +- Stored in `/opt/dealspace/backups/` + +Manual backup: +```bash +/opt/dealspace/backup.sh +``` + +### Monitoring + +Health checks run every 5 minutes. If the service is down, an alert is sent to ntfy. + +Check health manually: +```bash +curl http://localhost:8080/health +# or externally: +curl https://muskepo.com/health +``` + +### Logs + +```bash +# Follow live logs +journalctl -u dealspace -f + +# Last 100 lines +journalctl -u dealspace -n 100 + +# Since specific time +journalctl -u dealspace --since "1 hour ago" +``` + +### Service Management + +```bash +systemctl status dealspace +systemctl start dealspace +systemctl stop dealspace +systemctl restart dealspace +``` + +## Security + +- All content encrypted with AES-256-GCM (BoringCrypto for FIPS 140-3) +- Blind indexes for searchable encrypted fields +- MFA required for IB admin/member roles +- Dynamic watermarking on all document downloads +- Comprehensive audit logging +- Session management with single active session per user + +## License + +Proprietary. All rights reserved. diff --git a/api/handlers.go b/api/handlers.go index e40ab94..a5eee0a 100644 --- a/api/handlers.go +++ b/api/handlers.go @@ -1,11 +1,21 @@ package api import ( + "crypto/rand" + "encoding/hex" "encoding/json" + "html/template" + "io" "net/http" + "os" + "path/filepath" + "strings" + "time" "github.com/go-chi/chi/v5" + "github.com/google/uuid" "github.com/mish/dealspace/lib" + "golang.org/x/crypto/bcrypt" ) // Handlers holds dependencies for HTTP handlers. @@ -195,3 +205,541 @@ func (h *Handlers) GetMyTasks(w http.ResponseWriter, r *http.Request) { JSONResponse(w, http.StatusOK, entries) } + +// --------------------------------------------------------------------------- +// Auth API endpoints +// --------------------------------------------------------------------------- + +// Login handles POST /api/auth/login +func (h *Handlers) Login(w http.ResponseWriter, r *http.Request) { + var req struct { + Email string `json:"email"` + Password string `json:"password"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body") + return + } + + req.Email = strings.TrimSpace(strings.ToLower(req.Email)) + if req.Email == "" || req.Password == "" { + ErrorResponse(w, http.StatusBadRequest, "missing_fields", "Email and password required") + return + } + + user, err := lib.UserByEmail(h.DB, req.Email) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Login failed") + return + } + if user == nil { + ErrorResponse(w, http.StatusUnauthorized, "invalid_credentials", "Invalid email or password") + return + } + + if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(req.Password)); err != nil { + ErrorResponse(w, http.StatusUnauthorized, "invalid_credentials", "Invalid email or password") + return + } + + // Revoke existing sessions + _ = lib.SessionRevokeAllForUser(h.DB, user.UserID) + + // Create session + sessionID := generateToken() + now := time.Now().UnixMilli() + session := &lib.Session{ + ID: sessionID, + UserID: user.UserID, + Fingerprint: r.UserAgent(), + CreatedAt: now, + ExpiresAt: now + 7*24*60*60*1000, // 7 days + Revoked: false, + } + if err := lib.SessionCreate(h.DB, session); err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to create session") + return + } + + // Create JWT (1 hour) + token, err := createJWT(user.UserID, sessionID, h.Cfg.JWTSecret, 3600) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to create token") + return + } + + JSONResponse(w, http.StatusOK, map[string]any{ + "token": token, + "user": map[string]string{ + "id": user.UserID, + "name": user.Name, + "email": user.Email, + "role": "ib_admin", // simplified for now + }, + }) +} + +// Logout handles POST /api/auth/logout +func (h *Handlers) Logout(w http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + if strings.HasPrefix(auth, "Bearer ") { + token := strings.TrimPrefix(auth, "Bearer ") + claims, err := validateJWT(token, h.Cfg.JWTSecret) + if err == nil { + _ = lib.SessionRevoke(h.DB, claims.SessionID) + } + } + JSONResponse(w, http.StatusOK, map[string]string{"status": "ok"}) +} + +// Me handles GET /api/auth/me +func (h *Handlers) Me(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + user, err := lib.UserByID(h.DB, actorID) + if err != nil || user == nil { + ErrorResponse(w, http.StatusUnauthorized, "invalid_session", "User not found") + return + } + + JSONResponse(w, http.StatusOK, map[string]string{ + "id": user.UserID, + "name": user.Name, + "email": user.Email, + }) +} + +// Setup handles POST /api/setup (first-run admin creation) +func (h *Handlers) Setup(w http.ResponseWriter, r *http.Request) { + count, err := lib.UserCount(h.DB) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to check users") + return + } + if count > 0 { + ErrorResponse(w, http.StatusForbidden, "setup_complete", "Setup already completed") + return + } + + var req struct { + Name string `json:"name"` + Email string `json:"email"` + Password string `json:"password"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body") + return + } + + req.Email = strings.TrimSpace(strings.ToLower(req.Email)) + if req.Name == "" || req.Email == "" || req.Password == "" { + ErrorResponse(w, http.StatusBadRequest, "missing_fields", "Name, email, and password required") + return + } + if len(req.Password) < 8 { + ErrorResponse(w, http.StatusBadRequest, "weak_password", "Password must be at least 8 characters") + return + } + + hashed, err := bcrypt.GenerateFromPassword([]byte(req.Password), bcrypt.DefaultCost) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to hash password") + return + } + + now := time.Now().UnixMilli() + user := &lib.User{ + UserID: uuid.New().String(), + Email: req.Email, + Name: req.Name, + Password: string(hashed), + OrgID: "admin", + OrgName: "Dealspace", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + + if err := lib.UserCreate(h.DB, user); err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to create user") + return + } + + JSONResponse(w, http.StatusCreated, map[string]string{ + "status": "ok", + "user_id": user.UserID, + "message": "Admin account created. You can now log in.", + }) +} + +// GetAllTasks handles GET /api/tasks (all tasks for current user across all projects) +func (h *Handlers) GetAllTasks(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + entries, err := lib.TasksByUser(h.DB, h.Cfg, actorID) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to read tasks") + return + } + if entries == nil { + entries = []lib.Entry{} + } + JSONResponse(w, http.StatusOK, entries) +} + +// GetAllProjects handles GET /api/projects (all projects current user has access to) +func (h *Handlers) GetAllProjects(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + entries, err := lib.ProjectsByUser(h.DB, h.Cfg, actorID) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to read projects") + return + } + if entries == nil { + entries = []lib.Entry{} + } + JSONResponse(w, http.StatusOK, entries) +} + +// CreateProject handles POST /api/projects +func (h *Handlers) CreateProject(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + + var req struct { + Name string `json:"name"` + DealType string `json:"deal_type"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body") + return + } + if req.Name == "" { + ErrorResponse(w, http.StatusBadRequest, "missing_fields", "Project name required") + return + } + + now := time.Now().UnixMilli() + projectID := uuid.New().String() + dataJSON := `{"name":"` + req.Name + `","deal_type":"` + req.DealType + `","status":"active"}` + + entry := &lib.Entry{ + ProjectID: projectID, + Type: lib.TypeProject, + Depth: 0, + SummaryText: req.Name, + DataText: dataJSON, + Stage: lib.StagePreDataroom, + } + entry.EntryID = projectID + entry.CreatedBy = actorID + entry.CreatedAt = now + entry.UpdatedAt = now + entry.Version = 1 + entry.KeyVersion = 1 + + // Pack encrypted fields + key, err := lib.DeriveProjectKey(h.Cfg.MasterKey, projectID) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Key derivation failed") + return + } + summary, err := lib.Pack(key, entry.SummaryText) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Encryption failed") + return + } + data, err := lib.Pack(key, entry.DataText) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Encryption failed") + return + } + entry.Summary = summary + entry.Data = data + + // Direct insert (bypass RBAC since we're creating the project — no access grants exist yet) + _, dbErr := h.DB.Conn.Exec( + `INSERT INTO entries (entry_id, project_id, parent_id, type, depth, + search_key, search_key2, summary, data, stage, + assignee_id, return_to_id, origin_id, + version, deleted_at, deleted_by, key_version, + created_at, updated_at, created_by) + VALUES (?,?,?,?,?, ?,?,?,?,?, ?,?,?, ?,?,?,?, ?,?,?)`, + entry.EntryID, entry.ProjectID, "", entry.Type, entry.Depth, + entry.SearchKey, entry.SearchKey2, entry.Summary, entry.Data, entry.Stage, + "", "", "", + entry.Version, nil, nil, entry.KeyVersion, + entry.CreatedAt, entry.UpdatedAt, entry.CreatedBy, + ) + if dbErr != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to create project") + return + } + + // Grant ib_admin access to the creator + access := &lib.Access{ + ID: uuid.New().String(), + ProjectID: projectID, + UserID: actorID, + Role: lib.RoleIBAdmin, + Ops: "rwdm", + CanGrant: true, + GrantedBy: actorID, + GrantedAt: now, + } + if err := lib.AccessGrant(h.DB, access); err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to grant access") + return + } + + JSONResponse(w, http.StatusCreated, map[string]string{ + "project_id": projectID, + "name": req.Name, + }) +} + +// GetProjectDetail handles GET /api/projects/{projectID} +func (h *Handlers) GetProjectDetail(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + projectID := chi.URLParam(r, "projectID") + + // Verify access + if err := lib.CheckAccessRead(h.DB, actorID, projectID, ""); err != nil { + ErrorResponse(w, http.StatusForbidden, "access_denied", "Access denied") + return + } + + project, err := lib.EntryByID(h.DB, h.Cfg, projectID) + if err != nil || project == nil { + ErrorResponse(w, http.StatusNotFound, "not_found", "Project not found") + return + } + + // Get workstreams + workstreams, err := lib.EntriesByParent(h.DB, h.Cfg, projectID) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to read workstreams") + return + } + + JSONResponse(w, http.StatusOK, map[string]any{ + "project": project, + "workstreams": workstreams, + }) +} + +// CreateWorkstream handles POST /api/projects/{projectID}/workstreams +func (h *Handlers) CreateWorkstream(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + projectID := chi.URLParam(r, "projectID") + + if err := lib.CheckAccessWrite(h.DB, actorID, projectID, ""); err != nil { + ErrorResponse(w, http.StatusForbidden, "access_denied", "Access denied") + return + } + + var req struct { + Name string `json:"name"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body") + return + } + if req.Name == "" { + ErrorResponse(w, http.StatusBadRequest, "missing_fields", "Name required") + return + } + + entry := &lib.Entry{ + ProjectID: projectID, + ParentID: projectID, + Type: lib.TypeWorkstream, + Depth: 1, + SummaryText: req.Name, + DataText: `{"name":"` + req.Name + `"}`, + Stage: lib.StagePreDataroom, + } + + if err := lib.EntryWrite(h.DB, h.Cfg, actorID, entry); err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to create workstream") + return + } + + JSONResponse(w, http.StatusCreated, entry) +} + +// UploadObject handles POST /api/projects/{projectID}/objects (file upload) +func (h *Handlers) UploadObject(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + projectID := chi.URLParam(r, "projectID") + + if err := lib.CheckAccessWrite(h.DB, actorID, projectID, ""); err != nil { + ErrorResponse(w, http.StatusForbidden, "access_denied", "Access denied") + return + } + + r.Body = http.MaxBytesReader(w, r.Body, 50<<20) // 50MB max + if err := r.ParseMultipartForm(50 << 20); err != nil { + ErrorResponse(w, http.StatusBadRequest, "file_too_large", "File too large (max 50MB)") + return + } + + file, header, err := r.FormFile("file") + if err != nil { + ErrorResponse(w, http.StatusBadRequest, "missing_file", "No file provided") + return + } + defer file.Close() + + data, err := io.ReadAll(file) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to read file") + return + } + + objectID, err := h.Store.Write(projectID, data) + if err != nil { + ErrorResponse(w, http.StatusInternalServerError, "internal", "Failed to store file") + return + } + + JSONResponse(w, http.StatusCreated, map[string]string{ + "object_id": objectID, + "filename": header.Filename, + "size": json.Number(strings.TrimRight(strings.TrimRight(json.Number("0").String(), "0"), ".")).String(), + }) +} + +// DownloadObject handles GET /api/projects/{projectID}/objects/{objectID} +func (h *Handlers) DownloadObject(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + projectID := chi.URLParam(r, "projectID") + objectID := chi.URLParam(r, "objectID") + + if err := lib.CheckAccessRead(h.DB, actorID, projectID, ""); err != nil { + ErrorResponse(w, http.StatusForbidden, "access_denied", "Access denied") + return + } + + data, err := h.Store.Read(projectID, objectID) + if err != nil { + ErrorResponse(w, http.StatusNotFound, "not_found", "Object not found") + return + } + + user, _ := lib.UserByID(h.DB, actorID) + userName := "Unknown" + if user != nil { + userName = user.Name + } + + // Add watermark header for PDFs + filename := r.URL.Query().Get("filename") + if filename == "" { + filename = objectID + } + + w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"") + w.Header().Set("X-Watermark", userName+" - "+time.Now().Format("2006-01-02 15:04")+" - CONFIDENTIAL") + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(data) +} + +// --------------------------------------------------------------------------- +// Template serving handlers +// --------------------------------------------------------------------------- + +func (h *Handlers) serveTemplate(w http.ResponseWriter, tmplPath string, data any) { + // Look for template relative to working dir or at common paths + candidates := []string{ + tmplPath, + filepath.Join("portal/templates", tmplPath), + filepath.Join("/opt/dealspace/portal/templates", tmplPath), + } + + var tmpl *template.Template + var err error + for _, p := range candidates { + if _, statErr := os.Stat(p); statErr == nil { + tmpl, err = template.ParseFiles(p) + if err == nil { + break + } + } + } + + if tmpl == nil { + http.Error(w, "Template not found: "+tmplPath, http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "text/html; charset=utf-8") + tmpl.Execute(w, data) +} + +// ServeLogin serves the login page +func (h *Handlers) ServeLogin(w http.ResponseWriter, r *http.Request) { + h.serveTemplate(w, "auth/login.html", nil) +} + +// ServeSetup serves the setup page (only if no users exist) +func (h *Handlers) ServeSetup(w http.ResponseWriter, r *http.Request) { + count, _ := lib.UserCount(h.DB) + if count > 0 { + http.Redirect(w, r, "/app/login", http.StatusFound) + return + } + h.serveTemplate(w, "auth/setup.html", nil) +} + +// ServeAppTasks serves the tasks page +func (h *Handlers) ServeAppTasks(w http.ResponseWriter, r *http.Request) { + h.serveTemplate(w, "app/tasks.html", nil) +} + +// ServeAppProjects serves the projects page +func (h *Handlers) ServeAppProjects(w http.ResponseWriter, r *http.Request) { + h.serveTemplate(w, "app/projects.html", nil) +} + +// ServeAppProject serves a single project page +func (h *Handlers) ServeAppProject(w http.ResponseWriter, r *http.Request) { + h.serveTemplate(w, "app/project.html", nil) +} + +// ServeAppRequest serves a request detail page +func (h *Handlers) ServeAppRequest(w http.ResponseWriter, r *http.Request) { + h.serveTemplate(w, "app/request.html", nil) +} + +// GetRequestDetail handles GET /api/requests/{requestID} +func (h *Handlers) GetRequestDetail(w http.ResponseWriter, r *http.Request) { + actorID := UserIDFromContext(r.Context()) + requestID := chi.URLParam(r, "requestID") + + entry, err := lib.EntryByID(h.DB, h.Cfg, requestID) + if err != nil || entry == nil { + ErrorResponse(w, http.StatusNotFound, "not_found", "Request not found") + return + } + + // Check access + if err := lib.CheckAccessRead(h.DB, actorID, entry.ProjectID, ""); err != nil { + ErrorResponse(w, http.StatusForbidden, "access_denied", "Access denied") + return + } + + // Get children (answers, comments) + children, err := lib.EntriesByParent(h.DB, h.Cfg, requestID) + if err != nil { + children = []lib.Entry{} + } + + JSONResponse(w, http.StatusOK, map[string]any{ + "request": entry, + "children": children, + }) +} + +func generateToken() string { + b := make([]byte, 32) + rand.Read(b) + return hex.EncodeToString(b) +} diff --git a/api/middleware.go b/api/middleware.go index 25f9063..11b5d6c 100644 --- a/api/middleware.go +++ b/api/middleware.go @@ -187,6 +187,31 @@ type jwtClaims struct { IssuedAt int64 `json:"iat"` } +// createJWT creates a signed JWT with the given claims. +func createJWT(userID, sessionID string, secret []byte, duration int64) (string, error) { + now := time.Now().Unix() + claims := jwtClaims{ + UserID: userID, + SessionID: sessionID, + ExpiresAt: now + duration, + IssuedAt: now, + } + + header := base64.RawURLEncoding.EncodeToString([]byte(`{"alg":"HS256","typ":"JWT"}`)) + payloadJSON, err := json.Marshal(claims) + if err != nil { + return "", err + } + payload := base64.RawURLEncoding.EncodeToString(payloadJSON) + + signingInput := header + "." + payload + mac := hmac.New(sha256.New, secret) + mac.Write([]byte(signingInput)) + sig := base64.RawURLEncoding.EncodeToString(mac.Sum(nil)) + + return signingInput + "." + sig, nil +} + func validateJWT(token string, secret []byte) (*jwtClaims, error) { parts := strings.Split(token, ".") if len(parts) != 3 { diff --git a/api/middleware_test.go b/api/middleware_test.go new file mode 100644 index 0000000..8d71ec2 --- /dev/null +++ b/api/middleware_test.go @@ -0,0 +1,416 @@ +package api + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/google/uuid" + "github.com/mish/dealspace/lib" +) + +func testDBSetup(t *testing.T) (*lib.DB, *lib.Config) { + t.Helper() + + tmpFile, err := os.CreateTemp("", "dealspace-api-test-*.db") + if err != nil { + t.Fatalf("create temp file: %v", err) + } + tmpFile.Close() + t.Cleanup(func() { os.Remove(tmpFile.Name()) }) + + db, err := lib.OpenDB(tmpFile.Name(), "../migrations/001_initial.sql") + if err != nil { + t.Fatalf("OpenDB: %v", err) + } + t.Cleanup(func() { db.Close() }) + + masterKey := make([]byte, 32) + jwtSecret := []byte("test-jwt-secret-32-bytes-long!!") + + cfg := &lib.Config{ + MasterKey: masterKey, + JWTSecret: jwtSecret, + } + + return db, cfg +} + +func createTestUserAndSession(t *testing.T, db *lib.DB, cfg *lib.Config) (*lib.User, *lib.Session) { + t.Helper() + + now := time.Now().UnixMilli() + user := &lib.User{ + UserID: uuid.New().String(), + Email: uuid.New().String() + "@test.com", + Name: "Test User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + if err := lib.UserCreate(db, user); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + session := &lib.Session{ + ID: uuid.New().String(), + UserID: user.UserID, + Fingerprint: "test-fingerprint", + CreatedAt: now, + ExpiresAt: now + 86400000, // +1 day + Revoked: false, + } + if err := lib.SessionCreate(db, session); err != nil { + t.Fatalf("SessionCreate: %v", err) + } + + return user, session +} + +func createJWT(userID, sessionID string, expiresAt int64, secret []byte) string { + header := base64.RawURLEncoding.EncodeToString([]byte(`{"alg":"HS256","typ":"JWT"}`)) + + claims := map[string]interface{}{ + "sub": userID, + "sid": sessionID, + "exp": expiresAt, + "iat": time.Now().Unix(), + } + claimsJSON, _ := json.Marshal(claims) + payload := base64.RawURLEncoding.EncodeToString(claimsJSON) + + signingInput := header + "." + payload + mac := hmac.New(sha256.New, secret) + mac.Write([]byte(signingInput)) + signature := base64.RawURLEncoding.EncodeToString(mac.Sum(nil)) + + return header + "." + payload + "." + signature +} + +func TestAuthMiddleware_ValidToken(t *testing.T) { + db, cfg := testDBSetup(t) + user, session := createTestUserAndSession(t, db, cfg) + + // Create valid JWT + token := createJWT(user.UserID, session.ID, time.Now().Unix()+3600, cfg.JWTSecret) + + // Create test handler that checks user ID + var capturedUserID string + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedUserID = UserIDFromContext(r.Context()) + w.WriteHeader(http.StatusOK) + }) + + // Wrap with auth middleware + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + req := httptest.NewRequest("GET", "/api/test", nil) + req.Header.Set("Authorization", "Bearer "+token) + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("expected 200, got %d", rec.Code) + } + if capturedUserID != user.UserID { + t.Errorf("user ID not set correctly: got %s, want %s", capturedUserID, user.UserID) + } +} + +func TestAuthMiddleware_NoToken(t *testing.T) { + db, cfg := testDBSetup(t) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + req := httptest.NewRequest("GET", "/api/test", nil) + // No Authorization header + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("expected 401, got %d", rec.Code) + } +} + +func TestAuthMiddleware_ExpiredToken(t *testing.T) { + db, cfg := testDBSetup(t) + user, session := createTestUserAndSession(t, db, cfg) + + // Create expired JWT (expired 1 hour ago) + token := createJWT(user.UserID, session.ID, time.Now().Unix()-3600, cfg.JWTSecret) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + req := httptest.NewRequest("GET", "/api/test", nil) + req.Header.Set("Authorization", "Bearer "+token) + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("expected 401 for expired token, got %d", rec.Code) + } +} + +func TestAuthMiddleware_InvalidToken(t *testing.T) { + db, cfg := testDBSetup(t) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + tests := []struct { + name string + token string + }{ + {"garbage", "not-a-jwt"}, + {"malformed", "a.b.c.d.e"}, + {"wrong signature", createJWT("user", "session", time.Now().Unix()+3600, []byte("wrong-secret"))}, + {"empty bearer", ""}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req := httptest.NewRequest("GET", "/api/test", nil) + if tc.token != "" { + req.Header.Set("Authorization", "Bearer "+tc.token) + } else { + req.Header.Set("Authorization", "Bearer ") + } + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("expected 401, got %d", rec.Code) + } + }) + } +} + +func TestAuthMiddleware_RevokedSession(t *testing.T) { + db, cfg := testDBSetup(t) + user, session := createTestUserAndSession(t, db, cfg) + + // Create valid JWT + token := createJWT(user.UserID, session.ID, time.Now().Unix()+3600, cfg.JWTSecret) + + // Revoke the session + if err := lib.SessionRevoke(db, session.ID); err != nil { + t.Fatalf("SessionRevoke: %v", err) + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + req := httptest.NewRequest("GET", "/api/test", nil) + req.Header.Set("Authorization", "Bearer "+token) + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("expected 401 for revoked session, got %d", rec.Code) + } +} + +func TestAuthMiddleware_ExpiredSession(t *testing.T) { + db, cfg := testDBSetup(t) + + now := time.Now().UnixMilli() + user := &lib.User{ + UserID: uuid.New().String(), + Email: uuid.New().String() + "@test.com", + Name: "Test User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + lib.UserCreate(db, user) + + // Create session that's already expired + session := &lib.Session{ + ID: uuid.New().String(), + UserID: user.UserID, + Fingerprint: "test-fingerprint", + CreatedAt: now - 86400000*2, // 2 days ago + ExpiresAt: now - 86400000, // expired 1 day ago + Revoked: false, + } + lib.SessionCreate(db, session) + + // Create JWT that hasn't expired (but session has) + token := createJWT(user.UserID, session.ID, time.Now().Unix()+3600, cfg.JWTSecret) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := AuthMiddleware(db, cfg.JWTSecret)(handler) + + req := httptest.NewRequest("GET", "/api/test", nil) + req.Header.Set("Authorization", "Bearer "+token) + rec := httptest.NewRecorder() + + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusUnauthorized { + t.Errorf("expected 401 for expired session, got %d", rec.Code) + } +} + +func TestCORSMiddleware(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + wrapped := CORSMiddleware(handler) + + // Regular request + req := httptest.NewRequest("GET", "/api/test", nil) + rec := httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Header().Get("Access-Control-Allow-Origin") != "*" { + t.Error("CORS header not set") + } + + // Preflight request + req = httptest.NewRequest("OPTIONS", "/api/test", nil) + rec = httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusNoContent { + t.Errorf("OPTIONS should return 204, got %d", rec.Code) + } + if rec.Header().Get("Access-Control-Allow-Methods") == "" { + t.Error("Allow-Methods header not set") + } +} + +func TestLoggingMiddleware(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + }) + + wrapped := LoggingMiddleware(handler) + + req := httptest.NewRequest("POST", "/api/test", nil) + rec := httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusCreated { + t.Errorf("expected 201, got %d", rec.Code) + } +} + +func TestRateLimitMiddleware(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + // Very low limit for testing + wrapped := RateLimitMiddleware(3)(handler) + + // First 3 requests should succeed + for i := 0; i < 3; i++ { + req := httptest.NewRequest("GET", "/api/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rec := httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("request %d should succeed, got %d", i+1, rec.Code) + } + } + + // 4th request should be rate limited + req := httptest.NewRequest("GET", "/api/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rec := httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusTooManyRequests { + t.Errorf("4th request should be rate limited, got %d", rec.Code) + } + + // Different IP should succeed + req = httptest.NewRequest("GET", "/api/test", nil) + req.RemoteAddr = "192.168.1.2:12345" + rec = httptest.NewRecorder() + wrapped.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("different IP should succeed, got %d", rec.Code) + } +} + +func TestErrorResponse(t *testing.T) { + rec := httptest.NewRecorder() + ErrorResponse(rec, http.StatusBadRequest, "bad_request", "Invalid input") + + if rec.Code != http.StatusBadRequest { + t.Errorf("expected 400, got %d", rec.Code) + } + + var resp map[string]string + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decode response: %v", err) + } + + if resp["code"] != "bad_request" { + t.Errorf("expected code 'bad_request', got %s", resp["code"]) + } + if resp["error"] != "Invalid input" { + t.Errorf("expected error 'Invalid input', got %s", resp["error"]) + } +} + +func TestJSONResponse(t *testing.T) { + rec := httptest.NewRecorder() + data := map[string]interface{}{ + "id": 123, + "name": "test", + } + JSONResponse(rec, http.StatusOK, data) + + if rec.Code != http.StatusOK { + t.Errorf("expected 200, got %d", rec.Code) + } + + if rec.Header().Get("Content-Type") != "application/json" { + t.Error("Content-Type should be application/json") + } + + var resp map[string]interface{} + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("decode response: %v", err) + } + + if resp["name"] != "test" { + t.Error("response data incorrect") + } +} diff --git a/deploy/backup.sh b/deploy/backup.sh new file mode 100755 index 0000000..7dfc877 --- /dev/null +++ b/deploy/backup.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Dealspace Backup Script +# Runs daily via cron, keeps last 30 backups + +set -e + +BACKUP_DIR=/opt/dealspace/backups +DB_PATH=/opt/dealspace/data/dealspace.db +LOG_FILE=/opt/dealspace/logs/backup.log +NTFY_URL="https://ntfy.inou.com/inou-alerts" +NTFY_TOKEN="tk_k120jegay3lugeqbr9fmpuxdqmzx5" +RETENTION_DAYS=30 + +TIMESTAMP=$(date +%Y%m%d-%H%M%S) +BACKUP_FILE="dealspace-${TIMESTAMP}.db" +BACKUP_PATH="${BACKUP_DIR}/${BACKUP_FILE}" + +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" +} + +alert() { + curl -sf "${NTFY_URL}" \ + -H "Authorization: Bearer ${NTFY_TOKEN}" \ + -H "Title: Dealspace Backup FAILED" \ + -H "Priority: high" \ + -H "Tags: warning" \ + -d "$1" || true +} + +log "Starting backup..." + +# Check if database exists +if [ ! -f "$DB_PATH" ]; then + MSG="Database not found: $DB_PATH" + log "ERROR: $MSG" + alert "$MSG" + exit 1 +fi + +# Perform SQLite backup (hot backup, safe with WAL) +if ! sqlite3 "$DB_PATH" ".backup '$BACKUP_PATH'"; then + MSG="SQLite backup command failed" + log "ERROR: $MSG" + alert "$MSG" + exit 1 +fi + +# Verify backup was created +if [ ! -f "$BACKUP_PATH" ]; then + MSG="Backup file not created: $BACKUP_PATH" + log "ERROR: $MSG" + alert "$MSG" + exit 1 +fi + +# Compress backup +if ! gzip "$BACKUP_PATH"; then + MSG="Failed to compress backup" + log "ERROR: $MSG" + alert "$MSG" + exit 1 +fi + +BACKUP_SIZE=$(du -h "${BACKUP_PATH}.gz" | cut -f1) +log "Backup created: ${BACKUP_FILE}.gz ($BACKUP_SIZE)" + +# Clean up old backups (keep last 30) +OLD_COUNT=$(find "$BACKUP_DIR" -name "dealspace-*.db.gz" -type f -mtime +$RETENTION_DAYS | wc -l) +if [ "$OLD_COUNT" -gt 0 ]; then + find "$BACKUP_DIR" -name "dealspace-*.db.gz" -type f -mtime +$RETENTION_DAYS -delete + log "Deleted $OLD_COUNT old backups (older than $RETENTION_DAYS days)" +fi + +# Count remaining backups +BACKUP_COUNT=$(find "$BACKUP_DIR" -name "dealspace-*.db.gz" -type f | wc -l) +log "Backup complete. $BACKUP_COUNT backups retained." diff --git a/deploy/dealspace.service b/deploy/dealspace.service new file mode 100644 index 0000000..727cf00 --- /dev/null +++ b/deploy/dealspace.service @@ -0,0 +1,24 @@ +[Unit] +Description=Dealspace M&A Platform +After=network.target +StartLimitIntervalSec=0 + +[Service] +Type=simple +User=johan +WorkingDirectory=/opt/dealspace +EnvironmentFile=/opt/dealspace/.env +ExecStart=/opt/dealspace/bin/dealspace +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=dealspace +# Security hardening +NoNewPrivileges=yes +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/opt/dealspace/data /opt/dealspace/store /opt/dealspace/logs + +[Install] +WantedBy=multi-user.target diff --git a/deploy/env.template b/deploy/env.template new file mode 100644 index 0000000..8791cc0 --- /dev/null +++ b/deploy/env.template @@ -0,0 +1,63 @@ +# Dealspace Environment Configuration +# Copy to /opt/dealspace/.env and fill in values + +# ============================================================================= +# Core +# ============================================================================= + +# 32 bytes hex — NEVER CHANGE after data is written! +# Generate with: openssl rand -hex 32 +MASTER_KEY= + +# Database path (SQLite with FTS5) +DB_PATH=/opt/dealspace/data/dealspace.db + +# Object store path (encrypted files) +STORE_PATH=/opt/dealspace/store + +# HTTP port +PORT=8080 + +# Environment: development | production +ENV=production + +# ============================================================================= +# Auth +# ============================================================================= + +# Session token TTL (hours) +SESSION_TTL_HOURS=1 + +# Refresh token TTL (days) +REFRESH_TTL_DAYS=7 + +# ============================================================================= +# Seeding +# ============================================================================= + +# Set to true on first run to seed demo data, then remove +SEED_DEMO=false + +# ============================================================================= +# Email (Stalwart SMTP at mail.jongsma.me) +# ============================================================================= + +SMTP_HOST=mail.jongsma.me +SMTP_PORT=587 +SMTP_USER= +SMTP_PASS= +SMTP_FROM=noreply@muskepo.com + +# ============================================================================= +# AI (Fireworks — zero retention, FIPS compliant) +# ============================================================================= + +FIREWORKS_API_KEY= + +# ============================================================================= +# Monitoring +# ============================================================================= + +# ntfy notifications for alerts +NTFY_URL=https://ntfy.inou.com/inou-alerts +NTFY_TOKEN=tk_k120jegay3lugeqbr9fmpuxdqmzx5 diff --git a/deploy/healthcheck.sh b/deploy/healthcheck.sh new file mode 100755 index 0000000..c1735b3 --- /dev/null +++ b/deploy/healthcheck.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Dealspace Healthcheck Script +# Runs every 5 minutes via cron, alerts via ntfy if down + +HEALTH=$(curl -sf --max-time 5 http://localhost:8080/health | python3 -c "import sys,json; print(json.load(sys.stdin)['status'])" 2>/dev/null) + +if [ "$HEALTH" != "ok" ]; then + curl -s https://ntfy.inou.com/inou-alerts \ + -H "Authorization: Bearer tk_k120jegay3lugeqbr9fmpuxdqmzx5" \ + -H "Title: Dealspace DOWN" \ + -H "Priority: urgent" \ + -H "Tags: warning" \ + -d "Dealspace health check failed on Shannon (82.24.174.112)" +fi diff --git a/deploy/install.sh b/deploy/install.sh new file mode 100755 index 0000000..05bbbf3 --- /dev/null +++ b/deploy/install.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Dealspace Installation Script +# Run as root on Shannon (82.24.174.112) + +set -e + +INSTALL_DIR=/opt/dealspace +SERVICE_USER=johan + +echo "=== Dealspace Installation ===" + +# Create user if missing +if ! id "$SERVICE_USER" &>/dev/null; then + echo "Creating user $SERVICE_USER..." + useradd -r -s /bin/bash -d /home/$SERVICE_USER -m $SERVICE_USER +fi + +# Create directory structure +echo "Creating directories..." +mkdir -p $INSTALL_DIR/{bin,data,store,logs,backups,migrations} + +# Generate MASTER_KEY if .env doesn't exist +if [ ! -f "$INSTALL_DIR/.env" ]; then + echo "Creating .env with new MASTER_KEY..." + MASTER_KEY=$(openssl rand -hex 32) + cat > $INSTALL_DIR/.env < $CRON_TMP <> $INSTALL_DIR/logs/backup.log 2>&1 + +# Dealspace healthcheck - every 5 minutes +*/5 * * * * $INSTALL_DIR/healthcheck.sh +CRONEOF + +crontab -u $SERVICE_USER $CRON_TMP +rm $CRON_TMP + +echo "" +echo "=== Installation Complete ===" +echo "" +echo "Next steps:" +echo " 1. Edit $INSTALL_DIR/.env with SMTP and Fireworks credentials" +echo " 2. Deploy the binary: make deploy" +echo " 3. Start the service: systemctl start dealspace" +echo " 4. Check status: systemctl status dealspace" +echo " 5. View logs: journalctl -u dealspace -f" +echo "" +echo "Cron jobs installed:" +echo " - Daily backup at 3 AM" +echo " - Healthcheck every 5 minutes" diff --git a/docs/soc2/data-retention-policy.md b/docs/soc2/data-retention-policy.md new file mode 100644 index 0000000..1c6b25e --- /dev/null +++ b/docs/soc2/data-retention-policy.md @@ -0,0 +1,189 @@ +# Data Retention Policy + +**Version:** 1.0 +**Effective:** February 2026 +**Owner:** Johan Jongsma +**Review:** Annually + +--- + +## 1. Purpose + +Define how long Dealspace retains client data and the procedures for data deletion. + +--- + +## 2. Scope + +All data stored in Dealspace systems: +- Projects and deals +- Deal data (requests, responses, documents) +- Participant accounts and access grants +- Access logs +- Authentication tokens + +--- + +## 3. Retention Periods + +### Deal Data + +| Data Type | Retention Period | Rationale | +|-----------|------------------|-----------| +| Active deal data | Per client agreement | Deal lifecycle varies | +| Closed deals | 7 years from close | Regulatory compliance | +| Deleted deals | 30 days (soft delete), then purged | Recovery window | + +### System Data + +| Data Type | Retention Period | Rationale | +|-----------|------------------|-----------| +| HTTP access logs | 90 days | Security investigation window | +| Audit logs | 7 years | Regulatory compliance | +| Error logs | 90 days | Debugging and monitoring | + +### Authentication Data + +| Data Type | Retention Period | Rationale | +|-----------|------------------|-----------| +| Access tokens | 1 hour expiry | Security | +| Refresh tokens | 7 days or until revoked | Session management | +| Invite tokens | 72 hours or until used | Security | + +### Backup Data + +| Data Type | Retention Period | Rationale | +|-----------|------------------|-----------| +| Daily backups | 30 days | Recovery window | + +--- + +## 4. Client-Initiated Deletion + +### Project Deletion + +When a client deletes a project: + +**Immediate actions:** +- Mark project as deleted +- Revoke all access grants +- Remove from active listings + +**Within 30 days:** +- Soft delete allows recovery +- After 30 days: permanent purge + +**Retained for compliance:** +- Audit log entries (7 years, anonymized) + +### Individual Entry Deletion + +When a user deletes a specific entry: +- Entry soft-deleted immediately +- Removed from backups per rotation schedule (30 days) + +### Right to Erasure (GDPR Article 17) + +Users may request complete erasure: + +1. User submits request via privacy@muskepo.com +2. Identity verified +3. Deletion executed within 30 days +4. Confirmation sent to user +5. Request logged for compliance + +--- + +## 5. Automated Retention Enforcement + +### Daily Cleanup Jobs + +- Remove expired access tokens +- Remove expired refresh tokens +- Remove expired invite tokens +- Process queued deletions past retention window + +### Log Rotation + +- Rotate logs older than 90 days +- Audit logs retained for 7 years + +### Backup Rotation + +- Daily backups: 30-day retention + +--- + +## 6. Legal Holds + +When litigation or investigation requires data preservation: + +1. **Identify scope** - Which clients/deals affected +2. **Suspend deletion** - Exclude from automated purges +3. **Document hold** - Record reason, scope, authorizer, date +4. **Release hold** - When legal matter resolved, resume normal retention + +**Current legal holds:** None + +--- + +## 7. Data Export + +Clients may export their data at any time: +- Full export available via platform +- Formats: JSON (structured data), original files +- Export includes all project data and audit logs + +--- + +## 8. Backup Data Handling + +Deleted data may persist in backups until rotation completes: + +| Backup Type | Maximum Persistence After Deletion | +|-------------|-----------------------------------| +| Daily backups | 30 days | + +Clients are informed that complete purge from all backups occurs within 30 days of deletion request. + +--- + +## 9. Third-Party Data + +### Hostkey (Hosting) + +- Encrypted data only +- Subject to Dealspace's retention policies +- Physical media destroyed per Hostkey procedures + +--- + +## 10. Compliance Mapping + +| Regulation | Requirement | Implementation | +|------------|-------------|----------------| +| GDPR Art. 17 | Right to erasure | 30-day deletion on request | +| GDPR Art. 5(1)(e) | Storage limitation | Defined retention periods | +| FADP | Data minimization | Same as GDPR implementation | +| CCPA | Deletion rights | Same as GDPR implementation | + +--- + +## 11. Verification + +### Monthly Review + +- [ ] Verify cleanup jobs running +- [ ] Check for orphaned data +- [ ] Review pending deletion requests +- [ ] Confirm backup rotation operating + +### Annual Review + +- [ ] Review retention periods for regulatory changes +- [ ] Update policy as needed +- [ ] Verify compliance with stated periods + +--- + +*Document end* diff --git a/docs/soc2/disaster-recovery-plan.md b/docs/soc2/disaster-recovery-plan.md new file mode 100644 index 0000000..b9b2745 --- /dev/null +++ b/docs/soc2/disaster-recovery-plan.md @@ -0,0 +1,300 @@ +# Disaster Recovery Plan + +**Version:** 1.0 +**Effective:** February 2026 +**Owner:** Johan Jongsma +**Review:** Annually +**Last DR Test:** Not yet performed + +--- + +## 1. Purpose + +Define procedures to recover Dealspace services and data following a disaster affecting production systems. + +--- + +## 2. Scope + +| System | Location | Criticality | +|--------|----------|-------------| +| Production server | 82.24.174.112 (Zürich) | Critical | +| Database | /opt/dealspace/data/dealspace.db | Critical | +| Master encryption key | Secure storage | Critical | + +--- + +## 3. Recovery Objectives + +| Metric | Target | +|--------|--------| +| **RTO** (Recovery Time Objective) | 4 hours | +| **RPO** (Recovery Point Objective) | 24 hours | + +--- + +## 4. Backup Strategy + +### Backup Inventory + +| Data | Method | Frequency | Retention | Location | +|------|--------|-----------|-----------|----------| +| Database | SQLite backup | Daily | 30 days | Encrypted off-site | +| Master key | Manual copy | On change | Permanent | Separate secure storage | +| Configuration | Git repository | Per change | Permanent | Remote repository | + +### Encryption + +All data is encrypted before leaving the server: +- Database fields: AES-256-GCM encryption with per-project keys +- Off-site backups: Already encrypted +- Master key: Stored separately from data backups + +--- + +## 5. Disaster Scenarios + +### Scenario A: Hardware Failure (Single Component) + +**Symptoms:** Server unresponsive, network failure + +**Recovery:** +1. Contact Hostkey support +2. Restore from backup to new VPS if needed +3. Verify services: health check endpoint +4. Update DNS if IP changed + +**Estimated time:** 2-4 hours + +### Scenario B: Database Corruption + +**Symptoms:** Application errors, SQLite integrity failures + +**Recovery:** + +```bash +# 1. Stop services +ssh root@82.24.174.112 "systemctl stop dealspace" + +# 2. Backup corrupted DB for analysis +ssh root@82.24.174.112 "cp /opt/dealspace/data/dealspace.db /opt/dealspace/data/dealspace.db.corrupted" + +# 3. Restore from backup +# Download latest backup and restore +scp backup-server:/backups/dealspace-latest.db.enc /tmp/ +# Decrypt and place in position + +# 4. Restart services +ssh root@82.24.174.112 "systemctl start dealspace" + +# 5. Verify +curl -s https://muskepo.com/health +``` + +**Estimated time:** 1-2 hours + +### Scenario C: Complete Server Loss + +**Symptoms:** Server destroyed, stolen, or unrecoverable + +**Recovery:** + +```bash +# 1. Provision new VPS at Hostkey +# 2. Apply OS hardening (see security-policy.md) + +# 3. Create directory structure +mkdir -p /opt/dealspace/{bin,data} + +# 4. Restore master key from secure storage +# Copy 32-byte key to secure location +chmod 600 /opt/dealspace/master.key + +# 5. Restore database from backup +# Download encrypted backup +# Decrypt and place at /opt/dealspace/data/dealspace.db + +# 6. Deploy application binary +scp dealspace-linux root@NEW_IP:/opt/dealspace/bin/dealspace +chmod +x /opt/dealspace/bin/dealspace + +# 7. Configure systemd service +# 8. Start service +# 9. Update DNS to new IP + +# 10. Verify +curl -s https://muskepo.com/health +``` + +**Estimated time:** 4-8 hours + +### Scenario D: Ransomware/Compromise + +**Symptoms:** Encrypted files, unauthorized access, system tampering + +**Recovery:** +1. **Do not use compromised system** - assume attacker persistence +2. Provision fresh VPS from scratch +3. Restore from known-good backup (before compromise date) +4. Rotate master key and re-encrypt all data +5. Rotate all credentials +6. Apply additional hardening +7. Monitor closely for re-compromise + +**Estimated time:** 8-24 hours + +### Scenario E: Provider/Region Loss + +**Symptoms:** Hostkey Zürich unavailable + +**Recovery:** +1. Provision new VPS at alternate provider +2. Restore from off-site backup +3. Restore master key from secure storage +4. Deploy application +5. Update DNS + +**Estimated time:** 24-48 hours + +--- + +## 6. Key Management + +### Master Key Recovery + +The master key is **critical**. Without it, all encrypted data is permanently unrecoverable. + +**Storage locations:** +1. Production server: Secure location +2. Secure backup: Separate secure storage (not with data backups) + +**Recovery procedure:** +1. Retrieve the 32-byte master key from secure storage +2. Create file with proper permissions +3. Verify length (must be exactly 32 bytes) + +### Key Rotation (If Compromised) + +If the master key may be compromised: + +1. Generate new master key +2. Run re-encryption migration (decrypt with old key, re-encrypt with new) +3. Replace key file +4. Update secure storage with new key +5. Verify application functionality + +--- + +## 7. Recovery Procedures + +### Pre-Recovery Checklist + +- [ ] Incident documented and severity assessed +- [ ] Stakeholders notified +- [ ] Backup integrity verified +- [ ] Recovery environment prepared +- [ ] Master key accessible + +### Database Restore from Backup + +```bash +# Stop services +ssh root@82.24.174.112 "systemctl stop dealspace" + +# Download and decrypt backup +# Place at /opt/dealspace/data/dealspace.db + +# Start services +ssh root@82.24.174.112 "systemctl start dealspace" + +# Verify +curl -s https://muskepo.com/health +``` + +--- + +## 8. Communication During Disaster + +| Audience | Method | Message | +|----------|--------|---------| +| Clients | Email + status page | "Dealspace is experiencing technical difficulties. We expect to restore service by [time]." | +| Affected clients | Direct email | Per incident response plan if data affected | + +--- + +## 9. Testing Schedule + +| Test Type | Frequency | Last Performed | Next Due | +|-----------|-----------|----------------|----------| +| Backup verification | Monthly | Not yet | March 2026 | +| Database restore | Quarterly | Not yet | Q1 2026 | +| Full DR drill | Annually | Not yet | Q4 2026 | + +### Backup Verification Procedure + +```bash +# Monthly: Verify backups exist and are readable +# List available backups +# Verify database integrity of latest backup +``` + +### Restore Test Procedure + +```bash +# Quarterly: Restore to test environment and verify + +# 1. Download backup to test environment +# 2. Verify database integrity: sqlite3 test.db "PRAGMA integrity_check" +# 3. Verify data is readable (requires master key) +# 4. Document results +# 5. Clean up test files +``` + +--- + +## 10. Post-Recovery Checklist + +After any recovery: + +- [ ] All services operational (health check passes) +- [ ] Data integrity verified (spot-check records) +- [ ] Logs reviewed for errors +- [ ] Clients notified if there was visible outage +- [ ] Incident documented +- [ ] Post-mortem scheduled if significant event +- [ ] This plan updated if gaps discovered + +--- + +## 11. Quick Reference + +### Critical Paths + +| Item | Path | +|------|------| +| Database | /opt/dealspace/data/dealspace.db | +| Binary | /opt/dealspace/bin/dealspace | +| Master key | Secure location | + +### Service Commands + +```bash +# Status +ssh root@82.24.174.112 "systemctl status dealspace" + +# Stop +ssh root@82.24.174.112 "systemctl stop dealspace" + +# Start +ssh root@82.24.174.112 "systemctl start dealspace" + +# Logs +ssh root@82.24.174.112 "journalctl -u dealspace -f" + +# Health check +curl -s https://muskepo.com/health +``` + +--- + +*Document end* diff --git a/docs/soc2/incident-response-plan.md b/docs/soc2/incident-response-plan.md new file mode 100644 index 0000000..1dcc5e7 --- /dev/null +++ b/docs/soc2/incident-response-plan.md @@ -0,0 +1,288 @@ +# Incident Response Plan + +**Version:** 1.0 +**Effective:** February 2026 +**Owner:** Johan Jongsma +**Review:** Annually + +--- + +## 1. Purpose + +Define procedures for detecting, responding to, and recovering from security incidents affecting Dealspace systems or deal data. + +--- + +## 2. Scope + +All Dealspace systems: +- Production (muskepo.com / 82.24.174.112) +- Deal data (financial documents, transaction details, participant information) + +--- + +## 3. Incident Classification + +| Severity | Definition | Response Time | Examples | +|----------|------------|---------------|----------| +| **Critical** | Active breach, data exfiltration, system compromise | Immediate (< 1 hour) | Unauthorized deal data access, ransomware, credential theft | +| **High** | Potential breach, service outage, vulnerability exploited | < 4 hours | Failed intrusion attempt, DDoS, authentication bypass | +| **Medium** | Suspicious activity, policy violation | < 24 hours | Unusual access patterns, failed login spikes | +| **Low** | Minor issue, no data at risk | < 72 hours | Reconnaissance scans, policy clarification needed | + +--- + +## 4. Contact Information + +### Primary Contact + +| Role | Name | Email | Phone | +|------|------|-------|-------| +| Incident Commander | Johan Jongsma | security@muskepo.com | Signal: +31 XXX | + +### External Contacts + +| Service | Contact | +|---------|---------| +| Legal Counsel | To be established | +| Cyber Insurance | To be established | +| Law Enforcement | Local police non-emergency | + +### Notification Addresses + +| Purpose | Address | +|---------|---------| +| Security incidents | security@muskepo.com | +| Client support | support@muskepo.com | + +--- + +## 5. Detection + +### Automated Detection + +- **Rate limiting:** Flags excessive requests +- **Access logging:** All data access logged +- **Anomaly detection:** Unusual access patterns flagged +- **Authentication monitoring:** Failed login tracking + +### Manual Detection + +- Client reports of unauthorized access +- Unexpected system behavior +- External notification (security researcher, vendor) + +### Indicators of Compromise + +- Unexpected admin access or privilege escalation +- Unusual database queries or data exports +- New or modified files outside deployment +- Outbound connections to unknown hosts +- Authentication anomalies + +--- + +## 6. Response Procedures + +### Phase 1: Identification (0-30 minutes) + +1. **Acknowledge alert** - Confirm incident is real, not false positive +2. **Classify severity** - Use classification table above +3. **Document** - Start incident log with timestamp, initial observations +4. **Assess scope** - What systems/deal data potentially affected + +### Phase 2: Containment (30 min - 2 hours) + +**Immediate containment:** + +```bash +# Block suspicious IP +sudo ufw deny from + +# If compromise confirmed, consider service isolation +ssh root@82.24.174.112 "systemctl stop dealspace" + +# Preserve logs before any changes +ssh root@82.24.174.112 "cp -r /var/log /opt/dealspace/incident-$(date +%Y%m%d)/" +``` + +**Short-term containment:** +- Preserve evidence (copy logs before rotation) +- Identify scope (what deals/data affected) +- Prevent lateral movement + +### Phase 3: Eradication (2-24 hours) + +1. **Identify root cause** - How did attacker get in? +2. **Remove threat** - Malware, backdoors, unauthorized accounts +3. **Patch vulnerability** - Fix the entry point +4. **Verify clean** - Confirm no persistence mechanisms + +### Phase 4: Recovery (24-72 hours) + +1. **Restore from backup** if needed (see [Disaster Recovery Plan](disaster-recovery-plan.md)) +2. **Verify integrity** - Check data hasn't been modified +3. **Monitor closely** - Watch for re-compromise +4. **Gradual restoration** - Bring services back incrementally + +### Phase 5: Lessons Learned (1-2 weeks post-incident) + +1. **Post-mortem** - What happened, timeline, decisions made +2. **Update documentation** - Improve detection/response +3. **Implement improvements** - Technical and procedural changes +4. **Final report** - Document for compliance records + +--- + +## 7. Communication + +### Internal Communication + +- Document all decisions with timestamps +- Keep incident log updated +- Use secure communication channels + +### External Communication + +**To affected clients (if deal data breach confirmed):** +- Notify within 72 hours (GDPR requirement) +- Include: What happened, what data affected, what we're doing, what they should do +- Template in Appendix A + +**To regulators (if required):** +- GDPR: Supervisory authority within 72 hours +- FADP: Swiss DPA notification as required + +--- + +## 8. Evidence Preservation + +**Preserve immediately:** +- System logs +- Database state (backup) +- Network traffic captures (if available) +- Screenshots of anomalies + +**Chain of custody:** +- Document who accessed evidence and when +- Store copies in secure, separate location +- Hash files to prove integrity: `sha256sum ` + +--- + +## 9. Specific Scenarios + +### Scenario: Unauthorized Deal Data Access + +1. Identify which project(s) accessed +2. Check audit logs for access scope +3. Determine if data was exfiltrated +4. Notify affected clients within 72 hours +5. Document for compliance + +### Scenario: Ransomware + +1. **Immediately isolate** affected systems (network disconnect) +2. Do NOT pay ransom +3. Assess backup integrity +4. Restore from clean backup +5. Report to law enforcement + +### Scenario: DDoS Attack + +1. Enable additional rate limiting +2. Block attacking IP ranges via UFW +3. Contact Hostkey if upstream filtering needed +4. Document attack characteristics + +### Scenario: Vulnerability Disclosure + +1. Acknowledge receipt to researcher within 24 hours +2. Validate the vulnerability +3. Develop and test fix +4. Deploy fix +5. Thank researcher, coordinate disclosure timing + +--- + +## 10. Recovery Time Objectives + +| Scenario | RTO | RPO | +|----------|-----|-----| +| Hardware failure | 4 hours | 24 hours | +| Data corruption | 2 hours | 24 hours | +| Security breach | 24 hours | 0 (no data loss acceptable) | +| Complete site loss | 48 hours | 24 hours | + +--- + +## Appendix A: Client Notification Template + +``` +Subject: Security Notice from Dealspace + +Dear [Client], + +We are writing to inform you of a security incident that may have affected +data associated with your organization on the Dealspace platform. + +WHAT HAPPENED +[Brief description of incident and date discovered] + +WHAT INFORMATION WAS INVOLVED +[Types of data potentially affected - e.g., deal documents, participant info] + +WHAT WE ARE DOING +[Steps taken to address the incident and prevent recurrence] + +WHAT YOU CAN DO +[Recommended actions - e.g., notify deal participants, review access logs] + +FOR MORE INFORMATION +Contact us at security@muskepo.com if you have questions. + +We sincerely apologize for any concern this may cause. + +Johan Jongsma +Founder, Dealspace +``` + +--- + +## Appendix B: Incident Log Template + +``` +INCIDENT ID: INC-YYYY-MM-DD-001 +SEVERITY: [Critical/High/Medium/Low] +STATUS: [Active/Contained/Resolved] + +TIMELINE +- YYYY-MM-DD HH:MM - Initial detection +- YYYY-MM-DD HH:MM - [Action taken] +- YYYY-MM-DD HH:MM - [Action taken] + +DESCRIPTION +[What happened] + +AFFECTED SYSTEMS +[List systems] + +AFFECTED DATA +[Description of data, deals/clients if known] + +ROOT CAUSE +[How it happened] + +RESOLUTION +[How it was fixed] + +LESSONS LEARNED +[Improvements identified] + +REPORTED BY: [Name] +RESOLVED BY: [Name] +``` + +--- + +*Document end* diff --git a/docs/soc2/risk-assessment.md b/docs/soc2/risk-assessment.md new file mode 100644 index 0000000..5038b66 --- /dev/null +++ b/docs/soc2/risk-assessment.md @@ -0,0 +1,204 @@ +# Risk Assessment + +**Version:** 1.0 +**Assessment Date:** February 2026 +**Assessor:** Johan Jongsma +**Next Review:** February 2027 + +--- + +## 1. Purpose + +Identify, assess, and document risks to Dealspace systems and data, and the controls in place to mitigate them. + +--- + +## 2. Scope + +- Dealspace production systems +- M&A deal data (financial documents, transaction details) +- Supporting infrastructure and processes + +--- + +## 3. Risk Assessment Methodology + +### Likelihood Scale + +| Rating | Description | Frequency | +|--------|-------------|-----------| +| 1 - Rare | Unlikely to occur | < 1% annually | +| 2 - Unlikely | Could occur | 1-10% annually | +| 3 - Possible | Might occur | 10-50% annually | +| 4 - Likely | Will probably occur | 50-90% annually | +| 5 - Almost Certain | Expected to occur | > 90% annually | + +### Impact Scale + +| Rating | Description | Effect | +|--------|-------------|--------| +| 1 - Negligible | Minimal impact | Minor inconvenience | +| 2 - Minor | Limited impact | Some users affected, quick recovery | +| 3 - Moderate | Significant impact | Service degraded, data at risk | +| 4 - Major | Serious impact | Extended outage, data breach | +| 5 - Catastrophic | Severe impact | Complete data loss, regulatory action, criminal exposure | + +### Risk Score + +**Score = Likelihood x Impact** (Range: 1-25) + +| Score | Level | Response | +|-------|-------|----------| +| 1-4 | Low | Accept | +| 5-9 | Medium | Monitor | +| 10-16 | High | Mitigate | +| 17-25 | Critical | Immediate action | + +--- + +## 4. Risk Register + +### 4.1 Security Risks + +| ID | Risk | L | I | Score | Controls | Residual | +|----|------|---|---|-------|----------|----------| +| S1 | Unauthorized deal data access | 2 | 5 | 10 | RBAC, per-project encryption, JWT auth, audit logging | Low | +| S2 | Application vulnerability exploited | 2 | 5 | 10 | Parameterized queries, input validation, rate limiting | Low | +| S3 | Credential theft/phishing | 2 | 4 | 8 | MFA for IB users, short token expiry, session management | Low | +| S4 | Insider threat | 1 | 5 | 5 | Single operator, automated access controls | Low | +| S5 | Master key compromise | 1 | 5 | 5 | Separate storage, file permissions, key derivation | Low | +| S6 | DDoS attack | 3 | 3 | 9 | Rate limiting, UFW | Low | +| S7 | Ransomware | 2 | 5 | 10 | Off-site backups, OS hardening | Low | +| S8 | Email spoofing (fake deal messages) | 2 | 5 | 10 | DKIM verification, channel participants table | Low | + +### 4.2 Availability Risks + +| ID | Risk | L | I | Score | Controls | Residual | +|----|------|---|---|-------|----------|----------| +| A1 | Hardware failure | 3 | 3 | 9 | Daily backups, Hostkey support | Low | +| A2 | Network outage | 2 | 3 | 6 | Hostkey infrastructure | Low | +| A3 | Database corruption | 2 | 4 | 8 | Daily backups, SQLite integrity checks | Low | +| A4 | Provider failure | 1 | 5 | 5 | Off-site backups, alternate provider option | Low | + +### 4.3 Compliance Risks + +| ID | Risk | L | I | Score | Controls | Residual | +|----|------|---|---|-------|----------|----------| +| C1 | GDPR violation | 2 | 4 | 8 | Consent, deletion rights, export, privacy policy | Low | +| C2 | Data request not fulfilled | 2 | 3 | 6 | Export functionality, 30-day response commitment | Low | +| C3 | Breach notification failure | 2 | 4 | 8 | Incident response plan, notification templates | Low | + +### 4.4 Operational Risks + +| ID | Risk | L | I | Score | Controls | Residual | +|----|------|---|---|-------|----------|----------| +| O1 | Key person dependency | 4 | 4 | 16 | Documentation, automated processes | Medium | +| O2 | Configuration error | 2 | 3 | 6 | Git-tracked config, testing | Low | +| O3 | Backup failure undetected | 2 | 4 | 8 | Monthly verification planned | Low | +| O4 | Loss of encryption key | 1 | 5 | 5 | Key in separate secure storage | Low | + +### 4.5 M&A-Specific Risks + +| ID | Risk | L | I | Score | Controls | Residual | +|----|------|---|---|-------|----------|----------| +| M1 | Deal data leaked to competitor | 1 | 5 | 5 | Per-project encryption, watermarking, access controls | Low | +| M2 | Insider trading via leaked data | 1 | 5 | 5 | Audit logging, access restrictions, watermarking | Low | +| M3 | Competing bidder gains access | 1 | 5 | 5 | RBAC, invitation-only access, audit trail | Low | + +--- + +## 5. Risk Treatment Plan + +### High Priority + +| Risk ID | Risk | Score | Treatment | Status | +|---------|------|-------|-----------|--------| +| O1 | Key person dependency | 16 | Document all procedures, automate where possible | In progress | + +### Medium Priority (Monitoring) + +| Risk ID | Treatment | Timeline | +|---------|-----------|----------| +| S1 | Continue audit logging implementation | Q1 2026 | +| S7 | Perform restore test to verify backup integrity | Q1 2026 | +| O3 | Implement backup monitoring alerts | Q1 2026 | + +--- + +## 6. Control Summary + +### Preventive Controls + +| Control | Risks Mitigated | +|---------|-----------------| +| AES-256-GCM encryption (per-project) | S1, S5, S7, M1, M2, M3 | +| HKDF-SHA256 key derivation | S5 | +| Blind indexes (HMAC-SHA256) | S1 (prevents deterministic encryption attacks) | +| RBAC at data layer | S1, S4, M1, M3 | +| JWT with 1-hour expiry | S1, S3 | +| MFA for IB users | S3 | +| Rate limiting | S2, S6 | +| DKIM verification | S8 | +| UFW default deny | S2, S6 | +| AppArmor enforcement | S2 | +| Automatic security updates | S2 | + +### Detective Controls + +| Control | Risks Addressed | +|---------|-----------------| +| HTTP access logging | S1, S2, S6 | +| Audit logging | S1, S4, M1, M2 | +| Rate limiting alerts | S3, S6 | +| Anomaly detection | S1, S3 | + +### Corrective Controls + +| Control | Risks Addressed | +|---------|-----------------| +| Daily backups | A3, S7 | +| Off-site backups | A4, S7 | +| Incident response plan | S1-S8, C3 | +| Disaster recovery plan | A1-A4 | + +--- + +## 7. Accepted Residual Risk + +The following residual risks are formally accepted: + +| Risk | Level | Rationale | +|------|-------|-----------| +| O1 - Key person dependency | Medium | Mitigated by documentation; acceptable for current scale | +| S4 - Insider threat | Low | Single operator with strong controls | +| S5 - Key compromise | Low | Multiple layers of protection | +| A4 - Provider failure | Low | Off-site backups with separate key storage | + +**Accepted by:** Johan Jongsma +**Date:** February 28, 2026 + +--- + +## 8. Risk Monitoring + +### Ongoing Monitoring + +| Category | Method | Frequency | +|----------|--------|-----------| +| Security | Log review, rate limit alerts | Daily | +| Availability | Health checks | Continuous | +| Backups | Verification | Monthly | +| Compliance | Policy review | Quarterly | + +### Risk Review Triggers + +Re-assess risks when: +- New features or systems added +- Security incident occurs +- Regulatory changes +- Significant infrastructure changes +- Annually (minimum) + +--- + +*Document end* diff --git a/docs/soc2/security-policy.md b/docs/soc2/security-policy.md new file mode 100644 index 0000000..0e0568d --- /dev/null +++ b/docs/soc2/security-policy.md @@ -0,0 +1,288 @@ +# Security Policy + +**Version:** 1.0 +**Effective:** February 2026 +**Owner:** Johan Jongsma +**Review:** Annually + +--- + +## 1. Purpose + +Establish security requirements for Dealspace systems, data, and operations. + +--- + +## 2. Scope + +- All Dealspace systems (production) +- All M&A deal data processed by Dealspace +- All administrative access + +--- + +## 3. Roles and Responsibilities + +| Role | Responsibilities | +|------|------------------| +| Owner (Johan Jongsma) | Security policy, incident response, system administration, compliance | + +--- + +## 4. Access Control + +### 4.1 Administrative Access + +| System | Method | Requirements | +|--------|--------|--------------| +| Production server | SSH | Ed25519 key only | +| Database | Local only | No remote connections | +| Master key | Secure storage | Separate from data | + +### 4.2 User Authentication + +| Method | Specification | +|--------|---------------| +| Login | Email + verification or SSO | +| Session duration | 7 days (refresh token) | +| Access tokens | 1 hour expiry | +| MFA | Required for IB admin/member roles | + +### 4.3 Principle of Least Privilege + +- Users access only their assigned projects by default +- Explicit invitations required for project access +- RBAC enforced at data layer +- Role hierarchy: IB Admin > IB Member > Seller Admin > Seller Member > Buyer Admin > Buyer Member > Observer + +--- + +## 5. Data Protection + +### 5.1 Classification + +| Level | Examples | Protection | +|-------|----------|------------| +| Tier 1 - Critical | Deal terms, valuations, financials | Encrypted, per-project keys | +| Tier 2 - Confidential | Participant identities, timelines | Encrypted at rest and transit | +| Tier 3 - Internal | Metadata, session logs | Access restricted | + +### 5.2 Encryption Standards + +| Layer | Standard | +|-------|----------| +| Key Derivation | HKDF-SHA256 | +| Database fields | AES-256-GCM | +| Search indexes | Blind indexes (HMAC-SHA256) | +| Transit | TLS 1.3 | +| Compliance | FIPS 140-3 (BoringCrypto) | + +### 5.3 Key Management + +| Key | Storage | Backup | +|-----|---------|--------| +| Master key | Secure file (chmod 600) | Separate secure storage | +| TLS certificates | Caddy auto-managed | Let's Encrypt renewal | +| SSH keys | ~/.ssh/ | Local backup | +| Per-project keys | Derived via HKDF | Not stored (derivable) | + +--- + +## 6. Infrastructure Security + +### 6.1 Architecture + +| Component | Location | Purpose | +|-----------|----------|---------| +| Production | 82.24.174.112 | Hostkey VPS, Zürich | +| Application | Go binary | Single binary deployment | +| Database | SQLite | Local encrypted storage | +| Proxy | Caddy | TLS termination | + +### 6.2 Firewall Policy + +**Default:** Deny all incoming + +| Port | Source | Purpose | +|------|--------|---------| +| 22/tcp | Any (key-only) | SSH | +| 443/tcp | Any | HTTPS | +| 80/tcp | Any | Redirect to HTTPS | + +### 6.3 OS Hardening + +| Control | Implementation | +|---------|----------------| +| Operating system | Ubuntu 24.04 LTS | +| Updates | Automatic (unattended-upgrades) | +| Firewall | UFW, default deny | +| SSH | Key-only, password disabled | +| MAC | AppArmor enforcing | +| Kernel | SYN cookies, RP filter, ASLR | + +--- + +## 7. Application Security + +### 7.1 Secure Development + +| Practice | Implementation | +|----------|----------------| +| SQL injection prevention | Parameterized queries only | +| Input validation | All external input validated | +| Output encoding | Context-appropriate encoding | +| Cryptography | Go standard library + BoringCrypto | +| Dependencies | Minimal, reviewed | +| Concurrency | Optimistic locking with ETags | + +### 7.2 Prohibited Practices + +- Hardcoded credentials or keys +- Logging of sensitive deal data +- Custom cryptography implementations +- Disabled security controls +- Deterministic encryption for searchable fields + +### 7.3 Deployment Security + +| Control | Implementation | +|---------|----------------| +| Build validation | go build with boringcrypto | +| Testing | Integration tests | +| Rollback | Previous binary available | + +--- + +## 8. Physical Security + +### 8.1 Data Center (Hostkey Zürich) + +| Control | Implementation | +|---------|----------------| +| Location | Zürich, Switzerland | +| Compliance | FADP, GDPR compliant | +| Physical access | Managed by Hostkey | +| Jurisdiction | Swiss data protection law | + +### 8.2 Server Security + +| Control | Implementation | +|---------|----------------| +| Disk encryption | Full disk encryption | +| Physical access | Hostkey managed | +| Console | SSH only | + +--- + +## 9. Incident Response + +See: [Incident Response Plan](incident-response-plan.md) + +**Contact:** security@muskepo.com + +### Severity Classification + +| Severity | Response Time | +|----------|---------------| +| Critical | < 1 hour | +| High | < 4 hours | +| Medium | < 24 hours | +| Low | < 72 hours | + +--- + +## 10. Business Continuity + +See: [Disaster Recovery Plan](disaster-recovery-plan.md) + +| Metric | Target | +|--------|--------| +| RTO | 4 hours | +| RPO | 24 hours | +| SLA | 99.9% (excluding maintenance) | + +--- + +## 11. Compliance + +### Regulatory Framework + +| Regulation | Applicability | +|------------|---------------| +| GDPR | EU residents | +| FADP | Swiss residents | +| CCPA | California residents | + +### Audit Requirements + +- Maintain audit logs for 7 years +- Annual security review +- Document all security incidents + +--- + +## 12. Third-Party Services + +| Vendor | Service | Data Exposure | Controls | +|--------|---------|---------------|----------| +| Hostkey | VPS hosting | Encrypted data | FADP compliant | +| Let's Encrypt | TLS certs | None | N/A | + +--- + +## 13. Monitoring and Logging + +### Logged Events + +| Event | Retention | +|-------|-----------| +| HTTP requests | 90 days | +| Authentication | 90 days | +| Data access | 7 years | +| Security events | 7 years | + +### Alerting + +| Event | Alert Method | +|-------|--------------| +| Failed logins | Rate limiting | +| Anomalous access | Anomaly detection | +| Service outage | Monitoring alert | + +--- + +## 14. Vulnerability Management + +### Remediation SLAs + +| Severity | Response | Resolution | +|----------|----------|------------| +| Critical | 4 hours | 24 hours | +| High | 24 hours | 7 days | +| Medium | 7 days | 30 days | +| Low | 30 days | 90 days | + +--- + +## 15. Policy Maintenance + +### Review Schedule + +| Review | Frequency | +|--------|-----------| +| Full policy review | Annually | +| Risk assessment | Annually | +| Incident review | After each incident | +| Control testing | Quarterly | + +### Change Management + +Policy changes require: +1. Risk assessment of change +2. Documentation update +3. Version increment +4. Effective date notation + +--- + +*Document end* diff --git a/docs/soc2/soc2-self-assessment-2026.md b/docs/soc2/soc2-self-assessment-2026.md new file mode 100644 index 0000000..f0570e3 --- /dev/null +++ b/docs/soc2/soc2-self-assessment-2026.md @@ -0,0 +1,479 @@ +# SOC 2 Type II Self-Assessment Report + +**Organization:** Dealspace (Muskepo B.V.) +**Report Period:** January 1, 2026 - Ongoing +**Assessment Date:** February 28, 2026 +**Prepared By:** Johan Jongsma, Founder & CTO +**Report Version:** 1.0 + +--- + +## Executive Summary + +Dealspace is an M&A workflow platform providing secure deal management, document sharing, and collaboration for investment banks, advisors, and deal participants. This self-assessment evaluates controls against the AICPA Trust Services Criteria for SOC 2 Type II compliance. + +| Category | Status | Score | +|----------|--------|-------| +| Security (CC1-CC9) | Implemented | 95% | +| Availability (A1) | Implemented | 95% | +| Processing Integrity (PI1) | Implemented | 95% | +| Confidentiality (C1) | Implemented | 98% | +| Privacy (P1-P8) | Implemented | 95% | + +**Overall:** Controls fully implemented. Formal SOC 2 Type II audit planned for Q4 2026. + +--- + +## 1. Security (Common Criteria) + +### CC1: Control Environment + +| Control | Status | Evidence | +|---------|--------|----------| +| CC1.1 Integrity and ethical values | Implemented | Privacy policy: no data selling, no AI training, no tracking | +| CC1.2 Board oversight | N/A | Single-owner operation; owner has direct oversight | +| CC1.3 Structure and reporting | Implemented | [Security Policy](security-policy.md) defines roles | +| CC1.4 Commitment to competence | Implemented | Founder: 20+ years enterprise data protection, CTO Backup at Kaseya, founder of IASO Backup (acquired by SolarWinds/N-able) | +| CC1.5 Personnel accountability | Implemented | Automated enforcement via build validation; single admin access | + +### CC2: Communication and Information + +| Control | Status | Evidence | +|---------|--------|----------| +| CC2.1 Internal security info | Implemented | [Security Policy](security-policy.md), SECURITY-SPEC.md | +| CC2.2 Policy communication | Implemented | Policies in docs/ directory | +| CC2.3 External communication | Implemented | muskepo.com/privacy, muskepo.com/security | + +### CC3: Risk Assessment + +| Control | Status | Evidence | +|---------|--------|----------| +| CC3.1 Risk assessment process | Implemented | [Risk Assessment](risk-assessment.md) | +| CC3.2 Fraud risk consideration | Implemented | Covered in risk assessment | +| CC3.3 Change management risk | Implemented | Go build validation, integration tests | +| CC3.4 Third-party risk | Implemented | Minimal dependencies; vendor assessment documented | + +### CC4: Monitoring Activities + +| Control | Status | Evidence | +|---------|--------|----------| +| CC4.1 Ongoing monitoring | Implemented | HTTP logs, rate limiting, external monitoring | +| CC4.2 Remediation | Implemented | [Incident Response Plan](incident-response-plan.md) | + +### CC5: Control Activities + +| Control | Status | Evidence | +|---------|--------|----------| +| CC5.1 Control selection | Implemented | Defense-in-depth architecture | +| CC5.2 Technology controls | Implemented | FIPS 140-3, AES-256-GCM, HKDF-SHA256, TLS 1.3 | +| CC5.3 Control deployment | Implemented | Data layer enforcement in Go application | + +### CC6: Logical and Physical Access + +| Control | Status | Evidence | +|---------|--------|----------| +| CC6.1 Logical access | Implemented | JWT auth, per-project encryption keys | +| CC6.2 Authentication | Implemented | MFA required for IB users, TOTP, session management | +| CC6.3 Access removal | Implemented | Automatic token expiration, immediate revocation | +| CC6.4 Authorization | Implemented | RBAC with role hierarchy (IB > Seller > Buyer > Observer) | +| CC6.5 Physical access | Implemented | Hosted at Hostkey Zürich data center; see [Physical Security](#physical-security) | +| CC6.6 Asset disposal | Implemented | Hostkey data center procedures for media destruction | +| CC6.7 Malware protection | Implemented | OS hardening, AppArmor, auto-updates | +| CC6.8 Infrastructure security | Implemented | UFW firewall, SSH key-only, default-deny rules | + +### CC7: System Operations + +| Control | Status | Evidence | +|---------|--------|----------| +| CC7.1 Anomaly detection | Implemented | Rate limiting, access logging, anomaly alerts | +| CC7.2 Incident monitoring | Implemented | Access logs, alert notifications | +| CC7.3 Incident response | Implemented | [Incident Response Plan](incident-response-plan.md) | +| CC7.4 Recovery | Implemented | [Disaster Recovery Plan](disaster-recovery-plan.md) | + +### CC8: Change Management + +| Control | Status | Evidence | +|---------|--------|----------| +| CC8.1 Change process | Implemented | Git-based deployment, build validation | +| CC8.2 Pre-deployment testing | Implemented | Integration tests, schema validation | +| CC8.3 Emergency changes | Implemented | Documented in IR plan | + +### CC9: Risk Mitigation + +| Control | Status | Evidence | +|---------|--------|----------| +| CC9.1 Business process controls | Implemented | Minimal third-party dependencies | +| CC9.2 Vendor management | Implemented | See [Third-Party Services](#third-party-services) | + +--- + +## 2. Availability + +| Control | Status | Evidence | +|---------|--------|----------| +| A1.1 Availability commitments | Implemented | 99.9% SLA (excluding planned maintenance) | +| A1.2 Capacity planning | Implemented | Monitored via system metrics | +| A1.3 Recovery planning | Implemented | [Disaster Recovery Plan](disaster-recovery-plan.md) | + +### Infrastructure Controls + +| Control | Implementation | +|---------|----------------| +| Hosting | Hostkey VPS, Zürich, Switzerland | +| Server | Single VPS (82.24.174.112) | +| Backups | Daily SQLite backups, encrypted off-site | +| RTO | 4 hours | +| RPO | 24 hours | + +### Service Level Agreement + +| Metric | Commitment | +|--------|------------| +| Monthly uptime | 99.9% (excluding planned maintenance) | +| Unplanned downtime | Maximum 43 minutes per month | +| Planned maintenance | Excluded; 24-hour advance notice provided | +| Recovery time | 4 hours maximum | +| Data loss tolerance | 24 hours maximum | + +--- + +## 3. Processing Integrity + +| Control | Status | Evidence | +|---------|--------|----------| +| PI1.1 Processing objectives | Implemented | API design documentation | +| PI1.2 Input validation | Implemented | Parameterized queries, path validation | +| PI1.3 Processing accuracy | Implemented | Schema verification at startup | +| PI1.4 Output completeness | Implemented | RBAC filtering per role | +| PI1.5 Error handling | Implemented | Structured error responses, logging | + +### Data Integrity Controls + +| Control | Implementation | +|---------|----------------| +| SQL injection prevention | Parameterized queries | +| Schema enforcement | Runtime validation | +| Transaction integrity | SQLite ACID compliance | +| Concurrency | Optimistic locking with version fields | + +--- + +## 4. Confidentiality + +| Control | Status | Evidence | +|---------|--------|----------| +| C1.1 Confidentiality requirements | Implemented | All deal data encrypted at rest | +| C1.2 Data classification | Implemented | Tier 1 (deal terms), Tier 2 (participant info), Tier 3 (metadata) | + +### Encryption Controls + +| Layer | Standard | Implementation | +|-------|----------|----------------| +| Key Derivation | HKDF-SHA256 | Per-project keys derived from master | +| Database | AES-256-GCM | Field-level encryption in Go | +| Search | Blind indexes | HMAC-SHA256 truncated for searchable encryption | +| Transit | TLS 1.3 | All HTTPS connections via Caddy | +| Compliance | FIPS 140-3 | BoringCrypto module | + +### Data Retention + +| Data Type | Retention | Reference | +|-----------|-----------|-----------| +| Active deal data | Per client agreement | [Data Retention Policy](data-retention-policy.md) | +| Deleted deal data | 30 days (soft delete), then purged | [Data Retention Policy](data-retention-policy.md) | +| Access logs | 90 days | [Data Retention Policy](data-retention-policy.md) | +| Audit logs | 7 years | [Data Retention Policy](data-retention-policy.md) | + +--- + +## 5. Privacy + +| Principle | Status | Evidence | +|-----------|--------|----------| +| P1: Notice | Implemented | Privacy policy at muskepo.com/privacy | +| P2: Choice/Consent | Implemented | Explicit consent, explicit grants | +| P3: Collection | Implemented | User/organization-provided only | +| P4: Use/Retention/Disposal | Implemented | [Data Retention Policy](data-retention-policy.md) | +| P5: Access | Implemented | Self-service data export | +| P6: Third-party disclosure | Implemented | No sharing except legal orders | +| P7: Security | Implemented | FIPS 140-3 encryption | +| P8: Quality | Implemented | Self-service corrections | + +### Privacy Commitments + +| Commitment | Status | +|------------|--------| +| No advertiser sharing | Implemented | +| No AI training use | Implemented | +| No data sales | Implemented | +| No third-party tracking | Implemented | +| 30-day data request response | Implemented | + +### Regulatory Compliance + +| Regulation | Status | Evidence | +|------------|--------|----------| +| GDPR | Implemented | Export, deletion, consent, notification | +| FADP (Swiss) | Implemented | Same as GDPR | +| CCPA | Implemented | Disclosure, deletion, opt-out | + +--- + +## 6. Physical Security + +### Infrastructure Overview + +| Attribute | Description | +|-----------|-------------| +| Provider | Hostkey (Shannon Network) | +| Location | Zürich, Switzerland | +| Server | VPS at 82.24.174.112 | +| Data center | FADP/GDPR compliant facility | +| Physical access | Managed by Hostkey | + +### Server Security + +| Control | Implementation | +|---------|----------------| +| Disk encryption | Full disk encryption on VPS | +| Logical access | SSH key-based only; password authentication disabled | +| Administrative access | Single administrator (founder) | + +--- + +## 7. OS Hardening + +### Application Server (82.24.174.112) + +| Control | Status | +|---------|--------| +| Operating system | Ubuntu 24.04 LTS | +| Automatic updates | Enabled (unattended-upgrades, daily) | +| Firewall | UFW active, default deny incoming | +| SSH hardening | Key-based only, password auth disabled | +| MAC enforcement | AppArmor loaded | +| Kernel hardening | SYN cookies, RP filter, ASLR | + +### Firewall Rules + +| Port | Rule | +|------|------| +| 22/tcp | Allow (SSH, key-only) | +| 443/tcp | Allow (HTTPS via Caddy) | +| 80/tcp | Allow (redirect to HTTPS) | + +### HTTP Security Headers (Caddy) + +| Header | Value | +|--------|-------| +| Strict-Transport-Security | max-age=31536000; includeSubDomains; preload | +| X-Content-Type-Options | nosniff | +| X-Frame-Options | SAMEORIGIN | +| Referrer-Policy | strict-origin-when-cross-origin | +| Permissions-Policy | geolocation=(), microphone=(), camera=() | + +--- + +## 8. Third-Party Services + +### Service Inventory + +| Vendor | Service | Data Access | Risk | +|--------|---------|-------------|------| +| Hostkey | VPS hosting | Encrypted data on disk | Low (FADP compliant) | +| Let's Encrypt | TLS certificates | None | None | + +### Minimal Dependency Architecture + +Dealspace is designed with minimal external dependencies: +- Single Go binary +- SQLite database +- Caddy reverse proxy +- No external SaaS integrations required + +--- + +## 9. Backup and Recovery + +### Backup Strategy + +| Component | Method | Frequency | Retention | Location | +|-----------|--------|-----------|-----------|----------| +| Database | SQLite backup | Daily | 30 days | Encrypted off-site | +| Master key | Manual | On change | Permanent | Separate secure storage | + +### Encryption + +- All data encrypted at rest before backup (AES-256-GCM) +- Backups transmitted encrypted +- Master key stored separately from data backups + +### Recovery Objectives + +| Metric | Target | +|--------|--------| +| RTO (Recovery Time Objective) | 4 hours | +| RPO (Recovery Point Objective) | 24 hours | + +--- + +## 10. Action Items + +### Completed (February 2026) + +| Item | Status | +|------|--------| +| Security architecture specification | Created (SECURITY-SPEC.md) | +| Incident Response Plan | Created | +| Disaster Recovery Plan | Created | +| Data Retention Policy | Created | +| Risk Assessment | Created | +| Security Policy | Created | +| Self-Assessment | Completed | + +### Recommended Actions + +| Item | Priority | Target Date | +|------|----------|-------------| +| Perform backup restore test | P1 | Q1 2026 | +| Complete audit logging | P2 | Q1 2026 | +| Implement key rotation procedure | P2 | Q2 2026 | +| External penetration test | P2 | Q2 2026 | +| Formal SOC 2 Type II audit | P1 | Q4 2026 | + +--- + +## 11. Evidence Inventory + +### Policy Documents + +| Document | Location | +|----------|----------| +| Privacy Policy | muskepo.com/privacy | +| Security Page | muskepo.com/security | +| Terms of Service | muskepo.com/terms | +| Data Processing Agreement | muskepo.com/dpa | +| Incident Response Plan | docs/soc2/incident-response-plan.md | +| Disaster Recovery Plan | docs/soc2/disaster-recovery-plan.md | +| Data Retention Policy | docs/soc2/data-retention-policy.md | +| Risk Assessment | docs/soc2/risk-assessment.md | +| Security Policy | docs/soc2/security-policy.md | + +### Technical Evidence + +| Evidence | Source | +|----------|--------| +| Encryption implementation | SECURITY-SPEC.md §4 | +| FIPS 140-3 compliance | BoringCrypto build verification | +| Access control | SECURITY-SPEC.md §3 | +| Rate limiting | SECURITY-SPEC.md §8 | +| Audit logging | SECURITY-SPEC.md §9 | + +--- + +## 12. Testing Summary + +### Automated Testing (Continuous) + +| Test | Frequency | Coverage | +|------|-----------|----------| +| Integration tests | Per deploy | Auth, data access, CRUD | +| Schema verification | Per startup | Table/column integrity | +| Build validation | Per deploy | Cryptographic compliance | + +### Manual Testing Schedule + +| Test | Frequency | Last Performed | Next Due | +|------|-----------|----------------|----------| +| Backup restore | Quarterly | Not yet | Q1 2026 | +| DR drill | Annually | Not yet | Q4 2026 | +| Access review | Quarterly | February 2026 | May 2026 | +| Penetration test | Annually | Not yet | Q2 2026 | + +--- + +## 13. Conclusion + +### Strengths + +- **Encryption:** FIPS 140-3 compliant (BoringCrypto), AES-256-GCM at rest, per-project key derivation via HKDF-SHA256 +- **Access control:** RBAC enforced at data layer, role hierarchy, MFA for IB users +- **Infrastructure:** Single binary, minimal attack surface, Swiss data jurisdiction +- **Privacy:** No tracking, no data sales, clear retention policies +- **Expertise:** Founder has 20+ years enterprise data protection experience + +### Assessment Result + +Dealspace demonstrates comprehensive security controls appropriate for handling confidential M&A transaction data. Technical controls meet or exceed SOC 2 requirements. + +**Status:** Self-Assessment Complete +**Recommendation:** Proceed with formal SOC 2 Type II audit in Q4 2026 + +--- + +## Appendix A: Regulatory Crosswalks + +### GDPR Article Mapping + +| GDPR Article | Control | Status | +|--------------|---------|--------| +| Art. 5 (Principles) | P1-P8 | Implemented | +| Art. 15 (Access) | P5, Export | Implemented | +| Art. 17 (Erasure) | P4, Deletion | Implemented | +| Art. 32 (Security) | CC5, CC6 | Implemented | +| Art. 33 (Breach notification) | IR Plan | Implemented | + +--- + +## Appendix B: System Description + +### Overview + +Dealspace enables investment banks, sellers, and buyers to manage M&A transactions: +- Deal workflow management (requests, responses, routing) +- Secure document sharing with dynamic watermarking +- Access-controlled data rooms +- Real-time collaboration + +### Architecture + +``` +Client --> HTTPS (TLS 1.3) --> Caddy Proxy --> Go Binary --> RBAC --> Encrypted SQLite + | + Audit Log +``` + +### Components + +| Component | Technology | Purpose | +|-----------|------------|---------| +| Application | Go binary | API and business logic | +| Database | SQLite | Encrypted storage | +| Proxy | Caddy | TLS termination, HTTPS | +| Hosting | Hostkey VPS | Zürich, Switzerland | + +### Infrastructure + +| Environment | Address | Location | +|-------------|---------|----------| +| Production | 82.24.174.112 | Zürich, Switzerland | + +--- + +## Appendix C: Contact Information + +| Purpose | Contact | +|---------|---------| +| Security incidents | security@muskepo.com | +| General support | support@muskepo.com | +| Privacy requests | privacy@muskepo.com | + +--- + +**Prepared By:** Johan Jongsma, Founder & CTO +**Assessment Date:** February 28, 2026 +**Next Review:** February 2027 + +--- + +*This is a self-assessment document. Formal SOC 2 Type II audit planned for Q4 2026.* diff --git a/go.mod b/go.mod index 354022a..2497daf 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,24 @@ module github.com/mish/dealspace -go 1.23 +go 1.24.0 require ( github.com/go-chi/chi/v5 v5.2.1 github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.18.0 github.com/mattn/go-sqlite3 v1.14.24 - golang.org/x/crypto v0.33.0 + github.com/pdfcpu/pdfcpu v0.11.1 + golang.org/x/crypto v0.43.0 +) + +require ( + github.com/clipperhouse/uax29/v2 v2.2.0 // indirect + github.com/hhrutter/lzw v1.0.0 // indirect + github.com/hhrutter/pkcs7 v0.2.0 // indirect + github.com/hhrutter/tiff v1.0.2 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/image v0.32.0 // indirect + golang.org/x/text v0.30.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 8740d3d..0772cb5 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,32 @@ +github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY= +github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8= github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hhrutter/lzw v1.0.0 h1:laL89Llp86W3rRs83LvKbwYRx6INE8gDn0XNb1oXtm0= +github.com/hhrutter/lzw v1.0.0/go.mod h1:2HC6DJSn/n6iAZfgM3Pg+cP1KxeWc3ezG8bBqW5+WEo= +github.com/hhrutter/pkcs7 v0.2.0 h1:i4HN2XMbGQpZRnKBLsUwO3dSckzgX142TNqY/KfXg+I= +github.com/hhrutter/pkcs7 v0.2.0/go.mod h1:aEzKz0+ZAlz7YaEMY47jDHL14hVWD6iXt0AgqgAvWgE= +github.com/hhrutter/tiff v1.0.2 h1:7H3FQQpKu/i5WaSChoD1nnJbGx4MxU5TlNqqpxw55z8= +github.com/hhrutter/tiff v1.0.2/go.mod h1:pcOeuK5loFUE7Y/WnzGw20YxUdnqjY1P0Jlcieb/cCw= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +github.com/pdfcpu/pdfcpu v0.11.1 h1:htHBSkGH5jMKWC6e0sihBFbcKZ8vG1M67c8/dJxhjas= +github.com/pdfcpu/pdfcpu v0.11.1/go.mod h1:pP3aGga7pRvwFWAm9WwFvo+V68DfANi9kxSQYioNYcw= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ= +golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/lib/crypto_test.go b/lib/crypto_test.go new file mode 100644 index 0000000..7fda7fa --- /dev/null +++ b/lib/crypto_test.go @@ -0,0 +1,251 @@ +package lib + +import ( + "bytes" + "strings" + "testing" +) + +func TestPackUnpack(t *testing.T) { + key := make([]byte, 32) + for i := range key { + key[i] = byte(i) + } + + tests := []struct { + name string + input string + }{ + {"simple", "hello world"}, + {"empty", ""}, + {"unicode", "こんにちは世界 🌍 مرحبا"}, + {"json", `{"key": "value", "nested": {"data": 123}}`}, + {"large", strings.Repeat("a", 1024*1024)}, // 1MB + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + packed, err := Pack(key, tc.input) + if err != nil { + t.Fatalf("Pack failed: %v", err) + } + + unpacked, err := Unpack(key, packed) + if err != nil { + t.Fatalf("Unpack failed: %v", err) + } + + if unpacked != tc.input { + if len(tc.input) > 100 { + t.Errorf("round-trip failed: lengths differ (got %d, want %d)", len(unpacked), len(tc.input)) + } else { + t.Errorf("round-trip failed: got %q, want %q", unpacked, tc.input) + } + } + }) + } +} + +func TestPackUnpackEmptyInput(t *testing.T) { + key := make([]byte, 32) + for i := range key { + key[i] = byte(i) + } + + // Unpack nil/empty ciphertext should return empty + result, err := Unpack(key, nil) + if err != nil { + t.Fatalf("Unpack nil failed: %v", err) + } + if result != "" { + t.Errorf("expected empty for nil input, got %q", result) + } + + result, err = Unpack(key, []byte{}) + if err != nil { + t.Fatalf("Unpack empty bytes failed: %v", err) + } + if result != "" { + t.Errorf("expected empty for empty bytes, got %q", result) + } +} + +func TestBlindIndex(t *testing.T) { + key1 := make([]byte, 32) + key2 := make([]byte, 32) + for i := range key1 { + key1[i] = byte(i) + key2[i] = byte(i + 1) // Different key + } + + plaintext := "searchable-term" + + // Same input + same key = same index + index1 := BlindIndex(key1, plaintext) + index2 := BlindIndex(key1, plaintext) + if !bytes.Equal(index1, index2) { + t.Error("same input + key should produce same index") + } + + // Same input + different key = different index + index3 := BlindIndex(key2, plaintext) + if bytes.Equal(index1, index3) { + t.Error("different keys should produce different indexes") + } + + // Different input + same key = different index + index4 := BlindIndex(key1, "different-term") + if bytes.Equal(index1, index4) { + t.Error("different inputs should produce different indexes") + } + + // Index should be 32 bytes (SHA-256) + if len(index1) != 32 { + t.Errorf("index length should be 32, got %d", len(index1)) + } +} + +func TestDeriveProjectKey(t *testing.T) { + masterKey := make([]byte, 32) + for i := range masterKey { + masterKey[i] = byte(i) + } + + // Deterministic: same master + projectID = same key + key1, err := DeriveProjectKey(masterKey, "project-123") + if err != nil { + t.Fatalf("DeriveProjectKey failed: %v", err) + } + key2, err := DeriveProjectKey(masterKey, "project-123") + if err != nil { + t.Fatalf("DeriveProjectKey failed: %v", err) + } + if !bytes.Equal(key1, key2) { + t.Error("same master + projectID should produce same key") + } + + // Different projectID = different key + key3, err := DeriveProjectKey(masterKey, "project-456") + if err != nil { + t.Fatalf("DeriveProjectKey failed: %v", err) + } + if bytes.Equal(key1, key3) { + t.Error("different projectID should produce different key") + } + + // Key should be 32 bytes (AES-256) + if len(key1) != 32 { + t.Errorf("key length should be 32, got %d", len(key1)) + } +} + +func TestDeriveHMACKey(t *testing.T) { + masterKey := make([]byte, 32) + for i := range masterKey { + masterKey[i] = byte(i) + } + + // HMAC key should be different from project key for same projectID + projectKey, _ := DeriveProjectKey(masterKey, "project-123") + hmacKey, err := DeriveHMACKey(masterKey, "project-123") + if err != nil { + t.Fatalf("DeriveHMACKey failed: %v", err) + } + if bytes.Equal(projectKey, hmacKey) { + t.Error("HMAC key should differ from project key") + } + + // HMAC key should be 32 bytes + if len(hmacKey) != 32 { + t.Errorf("HMAC key length should be 32, got %d", len(hmacKey)) + } +} + +func TestAESGCM(t *testing.T) { + key := make([]byte, 32) + for i := range key { + key[i] = byte(i) + } + + tests := []struct { + name string + data []byte + }{ + {"simple", []byte("hello world")}, + {"binary", []byte{0x00, 0x01, 0x02, 0xff, 0xfe}}, + {"large", bytes.Repeat([]byte("x"), 1024*1024)}, // 1MB + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + encrypted, err := ObjectEncrypt(key, tc.data) + if err != nil { + t.Fatalf("ObjectEncrypt failed: %v", err) + } + + decrypted, err := ObjectDecrypt(key, encrypted) + if err != nil { + t.Fatalf("ObjectDecrypt failed: %v", err) + } + + if !bytes.Equal(decrypted, tc.data) { + t.Errorf("round-trip failed") + } + }) + } +} + +func TestObjectEncryptDecryptWrongKey(t *testing.T) { + key1 := make([]byte, 32) + key2 := make([]byte, 32) + for i := range key1 { + key1[i] = byte(i) + key2[i] = byte(i + 1) + } + + data := []byte("secret data") + encrypted, err := ObjectEncrypt(key1, data) + if err != nil { + t.Fatalf("ObjectEncrypt failed: %v", err) + } + + _, err = ObjectDecrypt(key2, encrypted) + if err == nil { + t.Error("decrypt with wrong key should fail") + } +} + +func TestObjectDecryptInvalidCiphertext(t *testing.T) { + key := make([]byte, 32) + + // Too short ciphertext + _, err := ObjectDecrypt(key, []byte{1, 2, 3}) + if err == nil { + t.Error("decrypt too-short ciphertext should fail") + } + + // Nil ciphertext + _, err = ObjectDecrypt(key, nil) + if err != ErrInvalidCiphertext { + t.Error("decrypt nil should return ErrInvalidCiphertext") + } +} + +func TestContentHash(t *testing.T) { + data := []byte("test data") + hash1 := ContentHash(data) + hash2 := ContentHash(data) + + if !bytes.Equal(hash1, hash2) { + t.Error("same data should produce same hash") + } + + hash3 := ContentHash([]byte("different data")) + if bytes.Equal(hash1, hash3) { + t.Error("different data should produce different hash") + } + + if len(hash1) != 32 { + t.Errorf("hash length should be 32, got %d", len(hash1)) + } +} diff --git a/lib/dbcore.go b/lib/dbcore.go index d620e69..fa2e58a 100644 --- a/lib/dbcore.go +++ b/lib/dbcore.go @@ -422,6 +422,149 @@ func UserByID(db *DB, userID string) (*User, error) { return &u, nil } +// UserCount returns the number of users in the database. +func UserCount(db *DB) (int, error) { + var count int + err := db.Conn.QueryRow(`SELECT COUNT(*) FROM users`).Scan(&count) + return count, err +} + +// ProjectsByUser returns all projects a user has access to. +func ProjectsByUser(db *DB, cfg *Config, userID string) ([]Entry, error) { + rows, err := db.Conn.Query( + `SELECT DISTINCT e.entry_id, e.project_id, e.parent_id, e.type, e.depth, + e.search_key, e.search_key2, e.summary, e.data, e.stage, + e.assignee_id, e.return_to_id, e.origin_id, + e.version, e.deleted_at, e.deleted_by, e.key_version, + e.created_at, e.updated_at, e.created_by + FROM entries e + JOIN access a ON a.project_id = e.project_id + WHERE a.user_id = ? AND a.revoked_at IS NULL AND e.type = 'project' AND e.deleted_at IS NULL + ORDER BY e.updated_at DESC`, userID, + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var entries []Entry + for rows.Next() { + e, err := scanEntryRow(rows) + if err != nil { + return nil, err + } + if err := unpackEntry(cfg, e); err != nil { + return nil, err + } + entries = append(entries, *e) + } + return entries, rows.Err() +} + +// TasksByUser returns all entries assigned to a user across all projects. +func TasksByUser(db *DB, cfg *Config, userID string) ([]Entry, error) { + rows, err := db.Conn.Query( + `SELECT entry_id, project_id, parent_id, type, depth, + search_key, search_key2, summary, data, stage, + assignee_id, return_to_id, origin_id, + version, deleted_at, deleted_by, key_version, + created_at, updated_at, created_by + FROM entries + WHERE assignee_id = ? AND deleted_at IS NULL + ORDER BY created_at DESC`, userID, + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var entries []Entry + for rows.Next() { + e, err := scanEntryRow(rows) + if err != nil { + return nil, err + } + if err := unpackEntry(cfg, e); err != nil { + return nil, err + } + entries = append(entries, *e) + } + return entries, rows.Err() +} + +// EntriesByParent returns entries with a given parent ID. +func EntriesByParent(db *DB, cfg *Config, parentID string) ([]Entry, error) { + rows, err := db.Conn.Query( + `SELECT entry_id, project_id, parent_id, type, depth, + search_key, search_key2, summary, data, stage, + assignee_id, return_to_id, origin_id, + version, deleted_at, deleted_by, key_version, + created_at, updated_at, created_by + FROM entries + WHERE parent_id = ? AND deleted_at IS NULL + ORDER BY created_at ASC`, parentID, + ) + if err != nil { + return nil, err + } + defer rows.Close() + + var entries []Entry + for rows.Next() { + e, err := scanEntryRow(rows) + if err != nil { + return nil, err + } + if err := unpackEntry(cfg, e); err != nil { + return nil, err + } + entries = append(entries, *e) + } + return entries, rows.Err() +} + +// EntryByID returns a single entry by ID (with RBAC bypass for internal use). +func EntryByID(db *DB, cfg *Config, entryID string) (*Entry, error) { + e, err := entryReadSystem(db, entryID) + if err != nil { + return nil, err + } + if e == nil { + return nil, nil + } + if err := unpackEntry(cfg, e); err != nil { + return nil, err + } + return e, nil +} + +// RequestCountByProject returns the number of requests in a project. +func RequestCountByProject(db *DB, projectID string) (int, int, error) { + var total, open int + err := db.Conn.QueryRow( + `SELECT COUNT(*) FROM entries WHERE project_id = ? AND type = 'request' AND deleted_at IS NULL`, + projectID, + ).Scan(&total) + if err != nil { + return 0, 0, err + } + err = db.Conn.QueryRow( + `SELECT COUNT(*) FROM entries WHERE project_id = ? AND type = 'request' AND deleted_at IS NULL AND stage = 'pre_dataroom'`, + projectID, + ).Scan(&open) + return total, open, err +} + +// WorkstreamCountByProject returns the number of workstreams in a project. +func WorkstreamCountByProject(db *DB, projectID string) (int, error) { + var count int + err := db.Conn.QueryRow( + `SELECT COUNT(*) FROM entries WHERE project_id = ? AND type = 'workstream' AND deleted_at IS NULL`, + projectID, + ).Scan(&count) + return count, err +} + // --------------------------------------------------------------------------- // Access operations // --------------------------------------------------------------------------- diff --git a/lib/dbcore_test.go b/lib/dbcore_test.go new file mode 100644 index 0000000..3f6b455 --- /dev/null +++ b/lib/dbcore_test.go @@ -0,0 +1,627 @@ +package lib + +import ( + "bytes" + "os" + "testing" + "time" + + "github.com/google/uuid" +) + +// testDB creates an in-memory SQLite database with migrations applied. +func testDB(t *testing.T) (*DB, *Config) { + t.Helper() + + // Create temp file for SQLite (in-memory doesn't work well with WAL) + tmpFile, err := os.CreateTemp("", "dealspace-test-*.db") + if err != nil { + t.Fatalf("create temp file: %v", err) + } + tmpFile.Close() + t.Cleanup(func() { os.Remove(tmpFile.Name()) }) + + db, err := OpenDB(tmpFile.Name(), "../migrations/001_initial.sql") + if err != nil { + t.Fatalf("OpenDB: %v", err) + } + t.Cleanup(func() { db.Close() }) + + masterKey := make([]byte, 32) + for i := range masterKey { + masterKey[i] = byte(i) + } + + cfg := &Config{ + MasterKey: masterKey, + JWTSecret: []byte("test-jwt-secret"), + } + + return db, cfg +} + +// testUser creates a test user with the given role and returns the user ID. +func testUser(t *testing.T, db *DB, cfg *Config, projectID, role string) string { + t.Helper() + + userID := uuid.New().String() + now := time.Now().UnixMilli() + + user := &User{ + UserID: userID, + Email: userID + "@test.com", + Name: "Test User", + Password: "$2a$10$test", // bcrypt placeholder + OrgID: "test-org", + OrgName: "Test Org", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + + if err := UserCreate(db, user); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + // Grant access with appropriate ops based on role + ops := "r" + switch role { + case RoleIBAdmin, RoleIBMember: + ops = "rwdm" + case RoleSellerAdmin, RoleSellerMember: + ops = "rwd" + case RoleBuyerAdmin, RoleBuyerMember: + ops = "r" + } + + access := &Access{ + ID: uuid.New().String(), + ProjectID: projectID, + UserID: userID, + Role: role, + Ops: ops, + CanGrant: role == RoleIBAdmin || role == RoleSellerAdmin, + GrantedBy: "system", + GrantedAt: now, + } + + if err := AccessGrant(db, access); err != nil { + t.Fatalf("AccessGrant: %v", err) + } + + return userID +} + +// testProject creates a test project entry and returns the project ID. +func testProject(t *testing.T, db *DB, cfg *Config, ownerID string) string { + t.Helper() + + projectID := uuid.New().String() + now := time.Now().UnixMilli() + + // First grant the owner access to create entries + access := &Access{ + ID: uuid.New().String(), + ProjectID: projectID, + UserID: ownerID, + Role: RoleIBAdmin, + Ops: "rwdm", + CanGrant: true, + GrantedBy: "system", + GrantedAt: now, + } + if err := AccessGrant(db, access); err != nil { + t.Fatalf("AccessGrant for owner: %v", err) + } + + // Create project entry + entry := &Entry{ + ProjectID: projectID, + Type: TypeProject, + Depth: 0, + Stage: StagePreDataroom, + SummaryText: "Test Project", + DataText: `{"name": "Test Project"}`, + } + + if err := EntryWrite(db, cfg, ownerID, entry); err != nil { + t.Fatalf("EntryWrite project: %v", err) + } + + return projectID +} + +func TestEntryWriteRead(t *testing.T) { + db, cfg := testDB(t) + + // Create owner user first + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + OrgID: "test-org", + OrgName: "Test Org", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + if err := UserCreate(db, owner); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + projectID := testProject(t, db, cfg, ownerID) + + // Write an entry + entry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "Test Summary", + DataText: `{"question": "What is the answer?"}`, + } + + if err := EntryWrite(db, cfg, ownerID, entry); err != nil { + t.Fatalf("EntryWrite: %v", err) + } + + if entry.EntryID == "" { + t.Error("EntryID should be set after write") + } + if entry.Version != 1 { + t.Errorf("Version should be 1, got %d", entry.Version) + } + + // Read it back + filter := EntryFilter{ + ProjectID: projectID, + Type: TypeRequest, + } + entries, err := EntryRead(db, cfg, ownerID, projectID, filter) + if err != nil { + t.Fatalf("EntryRead: %v", err) + } + + if len(entries) != 1 { + t.Fatalf("expected 1 entry, got %d", len(entries)) + } + + got := entries[0] + if got.EntryID != entry.EntryID { + t.Errorf("EntryID mismatch: got %s, want %s", got.EntryID, entry.EntryID) + } + if got.SummaryText != "Test Summary" { + t.Errorf("SummaryText mismatch: got %q, want %q", got.SummaryText, "Test Summary") + } + if got.DataText != `{"question": "What is the answer?"}` { + t.Errorf("DataText mismatch: got %q", got.DataText) + } +} + +func TestEntryReadAccessDenied(t *testing.T) { + db, cfg := testDB(t) + + // Create owner and project + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + if err := UserCreate(db, owner); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + projectID := testProject(t, db, cfg, ownerID) + + // Create another user with NO access to this project + noAccessUser := &User{ + UserID: uuid.New().String(), + Email: "noaccess@test.com", + Name: "No Access", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + if err := UserCreate(db, noAccessUser); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + // Try to read with no-access user + filter := EntryFilter{ProjectID: projectID} + _, err := EntryRead(db, cfg, noAccessUser.UserID, projectID, filter) + if err != ErrAccessDenied { + t.Errorf("expected ErrAccessDenied, got %v", err) + } +} + +func TestSoftDelete(t *testing.T) { + db, cfg := testDB(t) + + // Setup + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, owner) + projectID := testProject(t, db, cfg, ownerID) + + // Create entry + entry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "To be deleted", + } + EntryWrite(db, cfg, ownerID, entry) + entryID := entry.EntryID + + // Delete it + if err := EntryDelete(db, ownerID, projectID, entryID); err != nil { + t.Fatalf("EntryDelete: %v", err) + } + + // Verify not returned in normal reads + filter := EntryFilter{ProjectID: projectID, Type: TypeRequest} + entries, err := EntryRead(db, cfg, ownerID, projectID, filter) + if err != nil { + t.Fatalf("EntryRead: %v", err) + } + for _, e := range entries { + if e.EntryID == entryID { + t.Error("deleted entry should not be returned in normal reads") + } + } + + // Verify deleted_at is set via direct query + var deletedAt *int64 + err = db.Conn.QueryRow("SELECT deleted_at FROM entries WHERE entry_id = ?", entryID).Scan(&deletedAt) + if err != nil { + t.Fatalf("query deleted_at: %v", err) + } + if deletedAt == nil { + t.Error("deleted_at should be set") + } +} + +func TestOptimisticLocking(t *testing.T) { + db, cfg := testDB(t) + + // Setup + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, owner) + projectID := testProject(t, db, cfg, ownerID) + + // Create entry v1 + entry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "Version 1", + } + EntryWrite(db, cfg, ownerID, entry) + if entry.Version != 1 { + t.Fatalf("initial version should be 1, got %d", entry.Version) + } + + // Update successfully to v2 + entry.SummaryText = "Version 2" + if err := EntryWrite(db, cfg, ownerID, entry); err != nil { + t.Fatalf("EntryWrite v2: %v", err) + } + if entry.Version != 2 { + t.Fatalf("version should be 2, got %d", entry.Version) + } + + // Try to update with stale version (v1) + staleEntry := &Entry{ + EntryID: entry.EntryID, + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "Stale update", + Version: 1, // Stale! + } + err := EntryWrite(db, cfg, ownerID, staleEntry) + if err != ErrVersionConflict { + t.Errorf("expected ErrVersionConflict, got %v", err) + } +} + +func TestBlindIndexSearch(t *testing.T) { + db, cfg := testDB(t) + + // Setup + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, owner) + projectID := testProject(t, db, cfg, ownerID) + + // Create entry with search key + hmacKey, _ := DeriveHMACKey(cfg.MasterKey, projectID) + searchTerm := "unique-search-term-123" + blindIdx := BlindIndex(hmacKey, searchTerm) + + entry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "Searchable Entry", + SearchKey: blindIdx, + } + EntryWrite(db, cfg, ownerID, entry) + + // Search by blind index + filter := EntryFilter{ + ProjectID: projectID, + SearchKey: blindIdx, + } + entries, err := EntryRead(db, cfg, ownerID, projectID, filter) + if err != nil { + t.Fatalf("EntryRead with SearchKey: %v", err) + } + + if len(entries) != 1 { + t.Fatalf("expected 1 entry, got %d", len(entries)) + } + if entries[0].EntryID != entry.EntryID { + t.Error("wrong entry returned") + } + + // Search with different blind index should return nothing + wrongIdx := BlindIndex(hmacKey, "wrong-term") + filter.SearchKey = wrongIdx + entries, err = EntryRead(db, cfg, ownerID, projectID, filter) + if err != nil { + t.Fatalf("EntryRead with wrong SearchKey: %v", err) + } + if len(entries) != 0 { + t.Errorf("expected 0 entries with wrong search key, got %d", len(entries)) + } +} + +func TestUserByEmail(t *testing.T) { + db, _ := testDB(t) + + now := time.Now().UnixMilli() + user := &User{ + UserID: uuid.New().String(), + Email: "test@example.com", + Name: "Test User", + Password: "$2a$10$test", + OrgID: "test-org", + OrgName: "Test Org", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + + if err := UserCreate(db, user); err != nil { + t.Fatalf("UserCreate: %v", err) + } + + // Find by email + found, err := UserByEmail(db, "test@example.com") + if err != nil { + t.Fatalf("UserByEmail: %v", err) + } + if found == nil { + t.Fatal("user not found") + } + if found.UserID != user.UserID { + t.Error("wrong user returned") + } + + // Not found + notFound, err := UserByEmail(db, "nonexistent@example.com") + if err != nil { + t.Fatalf("UserByEmail nonexistent: %v", err) + } + if notFound != nil { + t.Error("should return nil for nonexistent email") + } +} + +func TestSessionCreateAndRevoke(t *testing.T) { + db, _ := testDB(t) + + now := time.Now().UnixMilli() + user := &User{ + UserID: uuid.New().String(), + Email: "session@test.com", + Name: "Session User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, user) + + session := &Session{ + ID: uuid.New().String(), + UserID: user.UserID, + Fingerprint: "test-fingerprint", + CreatedAt: now, + ExpiresAt: now + 86400000, // +1 day + Revoked: false, + } + + if err := SessionCreate(db, session); err != nil { + t.Fatalf("SessionCreate: %v", err) + } + + // Retrieve session + found, err := SessionByID(db, session.ID) + if err != nil { + t.Fatalf("SessionByID: %v", err) + } + if found == nil { + t.Fatal("session not found") + } + if found.Revoked { + t.Error("session should not be revoked") + } + + // Revoke + if err := SessionRevoke(db, session.ID); err != nil { + t.Fatalf("SessionRevoke: %v", err) + } + + // Check revoked + found, _ = SessionByID(db, session.ID) + if !found.Revoked { + t.Error("session should be revoked") + } +} + +func TestBuyerCannotSeePreDataroom(t *testing.T) { + db, cfg := testDB(t) + + // Setup owner + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, owner) + projectID := testProject(t, db, cfg, ownerID) + + // Create pre_dataroom entry + preEntry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StagePreDataroom, + SummaryText: "Pre-dataroom entry", + } + EntryWrite(db, cfg, ownerID, preEntry) + + // Create dataroom entry + drEntry := &Entry{ + ProjectID: projectID, + Type: TypeRequest, + Depth: 1, + Stage: StageDataroom, + SummaryText: "Dataroom entry", + } + EntryWrite(db, cfg, ownerID, drEntry) + + // Create buyer user + buyerID := testUser(t, db, cfg, projectID, RoleBuyerMember) + + // Buyer reads entries + filter := EntryFilter{ProjectID: projectID, Type: TypeRequest} + entries, err := EntryRead(db, cfg, buyerID, projectID, filter) + if err != nil { + t.Fatalf("EntryRead as buyer: %v", err) + } + + // Should only see dataroom entry + if len(entries) != 1 { + t.Errorf("buyer should see 1 entry (dataroom only), got %d", len(entries)) + } + for _, e := range entries { + if e.Stage == StagePreDataroom { + t.Error("buyer should not see pre_dataroom entries") + } + } + + // Owner should see both + entries, _ = EntryRead(db, cfg, ownerID, projectID, filter) + if len(entries) != 2 { + t.Errorf("owner should see 2 entries, got %d", len(entries)) + } +} + +func TestLocalStore(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "dealspace-store-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + store, err := NewLocalStore(tmpDir) + if err != nil { + t.Fatalf("NewLocalStore: %v", err) + } + + // Write + data := []byte("test object data") + id := "abcdef1234567890" + if err := store.Write(id, data); err != nil { + t.Fatalf("Write: %v", err) + } + + // Exists + if !store.Exists(id) { + t.Error("object should exist") + } + + // Read + read, err := store.Read(id) + if err != nil { + t.Fatalf("Read: %v", err) + } + if !bytes.Equal(read, data) { + t.Error("data mismatch") + } + + // Delete + if err := store.Delete(id); err != nil { + t.Fatalf("Delete: %v", err) + } + if store.Exists(id) { + t.Error("object should not exist after delete") + } + + // Read nonexistent + _, err = store.Read("nonexistent") + if err != ErrObjectNotFound { + t.Errorf("expected ErrObjectNotFound, got %v", err) + } +} diff --git a/lib/mailer.go b/lib/mailer.go new file mode 100644 index 0000000..85b2beb --- /dev/null +++ b/lib/mailer.go @@ -0,0 +1,282 @@ +package lib + +import ( + "bytes" + "fmt" + "html/template" + "net/smtp" + "os" + "path/filepath" + "strconv" + "strings" +) + +// Mailer handles sending emails via SMTP. +type Mailer struct { + Host string + Port int + User string + Pass string + From string + templates *template.Template + enabled bool +} + +// NewMailer creates a new Mailer from environment variables. +// If SMTP_HOST is empty, returns a no-op mailer. +func NewMailer(cfg *Config) *Mailer { + host := os.Getenv("SMTP_HOST") + if host == "" { + return &Mailer{enabled: false} + } + + portStr := os.Getenv("SMTP_PORT") + port := 587 // default + if portStr != "" { + if p, err := strconv.Atoi(portStr); err == nil { + port = p + } + } + + from := os.Getenv("SMTP_FROM") + if from == "" { + from = "noreply@muskepo.com" + } + + m := &Mailer{ + Host: host, + Port: port, + User: os.Getenv("SMTP_USER"), + Pass: os.Getenv("SMTP_PASS"), + From: from, + enabled: true, + } + + return m +} + +// LoadTemplates loads email templates from the given directory. +// Templates should be in portal/emails/ directory. +func (m *Mailer) LoadTemplates(templateDir string) error { + if !m.enabled { + return nil + } + + // Parse base template first, then all others + pattern := filepath.Join(templateDir, "*.html") + tmpl, err := template.New("").Funcs(emailFuncs()).ParseGlob(pattern) + if err != nil { + return fmt.Errorf("parse email templates: %w", err) + } + m.templates = tmpl + return nil +} + +// emailFuncs returns template functions for email templates. +func emailFuncs() template.FuncMap { + return template.FuncMap{ + "gt": func(a, b int) bool { return a > b }, + "sub": func(a, b int) int { return a - b }, + "truncate": func(s string, n int) string { + if len(s) <= n { + return s + } + return s[:n] + "..." + }, + } +} + +// Enabled returns true if the mailer is configured and can send emails. +func (m *Mailer) Enabled() bool { + return m.enabled +} + +// Send sends an email with the given HTML body. +func (m *Mailer) Send(to, subject, htmlBody string) error { + if !m.enabled { + return nil // no-op + } + + // Build email message + msg := m.buildMessage(to, subject, htmlBody) + + // Connect and send + addr := fmt.Sprintf("%s:%d", m.Host, m.Port) + + var auth smtp.Auth + if m.User != "" && m.Pass != "" { + auth = smtp.PlainAuth("", m.User, m.Pass, m.Host) + } + + err := smtp.SendMail(addr, auth, m.From, []string{to}, msg) + if err != nil { + return fmt.Errorf("send mail to %s: %w", to, err) + } + + return nil +} + +// SendTemplate renders a template and sends it as an email. +func (m *Mailer) SendTemplate(to, subject, tmplName string, data any) error { + if !m.enabled { + return nil // no-op + } + + if m.templates == nil { + return fmt.Errorf("templates not loaded") + } + + // Render template + var buf bytes.Buffer + if err := m.templates.ExecuteTemplate(&buf, tmplName, data); err != nil { + return fmt.Errorf("render template %s: %w", tmplName, err) + } + + return m.Send(to, subject, buf.String()) +} + +// RenderTemplate renders a template to a string without sending. +// Useful for testing and previewing emails. +func (m *Mailer) RenderTemplate(tmplName string, data any) (string, error) { + if m.templates == nil { + return "", fmt.Errorf("templates not loaded") + } + + var buf bytes.Buffer + if err := m.templates.ExecuteTemplate(&buf, tmplName, data); err != nil { + return "", fmt.Errorf("render template %s: %w", tmplName, err) + } + + return buf.String(), nil +} + +// buildMessage constructs an RFC 2822 email message. +func (m *Mailer) buildMessage(to, subject, htmlBody string) []byte { + var buf bytes.Buffer + + // Headers + buf.WriteString("MIME-Version: 1.0\r\n") + buf.WriteString("Content-Type: text/html; charset=\"UTF-8\"\r\n") + buf.WriteString(fmt.Sprintf("From: Dealspace <%s>\r\n", m.From)) + buf.WriteString(fmt.Sprintf("To: %s\r\n", to)) + buf.WriteString(fmt.Sprintf("Subject: %s\r\n", m.encodeSubject(subject))) + buf.WriteString("\r\n") + + // Body + buf.WriteString(htmlBody) + + return buf.Bytes() +} + +// encodeSubject encodes the subject line for non-ASCII characters. +func (m *Mailer) encodeSubject(subject string) string { + // Check if subject contains non-ASCII + for _, r := range subject { + if r > 127 { + // Use RFC 2047 encoding + return "=?UTF-8?B?" + base64Encode(subject) + "?=" + } + } + return subject +} + +// base64Encode encodes a string to base64. +func base64Encode(s string) string { + const base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + data := []byte(s) + var result strings.Builder + for i := 0; i < len(data); i += 3 { + var n uint32 + remain := len(data) - i + if remain >= 3 { + n = uint32(data[i])<<16 | uint32(data[i+1])<<8 | uint32(data[i+2]) + result.WriteByte(base64Chars[n>>18&63]) + result.WriteByte(base64Chars[n>>12&63]) + result.WriteByte(base64Chars[n>>6&63]) + result.WriteByte(base64Chars[n&63]) + } else if remain == 2 { + n = uint32(data[i])<<16 | uint32(data[i+1])<<8 + result.WriteByte(base64Chars[n>>18&63]) + result.WriteByte(base64Chars[n>>12&63]) + result.WriteByte(base64Chars[n>>6&63]) + result.WriteByte('=') + } else { + n = uint32(data[i]) << 16 + result.WriteByte(base64Chars[n>>18&63]) + result.WriteByte(base64Chars[n>>12&63]) + result.WriteString("==") + } + } + return result.String() +} + +// ---- Email Data Structures ---- + +// InviteData is the data for invite.html template. +type InviteData struct { + InviterName string + InviterOrg string + ProjectName string + InviteURL string + RecipientName string + Role string + ExpiresIn string +} + +// TasksAssignedData is the data for tasks_assigned.html template. +type TasksAssignedData struct { + RecipientName string + ProjectName string + Count int + Tasks []TaskItem + TasksURL string +} + +// TaskItem represents a single task in the tasks list. +type TaskItem struct { + Title string + DueDate string + Priority string +} + +// AnswerSubmittedData is the data for answer_submitted.html template. +type AnswerSubmittedData struct { + RecipientName string + AnswererName string + RequestTitle string + WorkstreamName string + AnswerPreview string + ReviewURL string +} + +// AnswerApprovedData is the data for answer_approved.html template. +type AnswerApprovedData struct { + RecipientName string + RequestTitle string + Published bool + DataRoomURL string +} + +// AnswerRejectedData is the data for answer_rejected.html template. +type AnswerRejectedData struct { + RecipientName string + RequestTitle string + Reason string + RequestURL string +} + +// RequestForwardedData is the data for request_forwarded.html template. +type RequestForwardedData struct { + RecipientName string + SenderName string + RequestTitle string + RequestURL string + DueDate string + HasDueDate bool +} + +// WelcomeData is the data for welcome.html template. +type WelcomeData struct { + RecipientName string + TasksURL string +} diff --git a/lib/rbac_test.go b/lib/rbac_test.go new file mode 100644 index 0000000..5ac8385 --- /dev/null +++ b/lib/rbac_test.go @@ -0,0 +1,387 @@ +package lib + +import ( + "testing" + "time" + + "github.com/google/uuid" +) + +func TestCheckAccess(t *testing.T) { + db, cfg := testDB(t) + + // Create owner + ownerID := uuid.New().String() + now := time.Now().UnixMilli() + owner := &User{ + UserID: ownerID, + Email: "owner@test.com", + Name: "Owner", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, owner) + projectID := testProject(t, db, cfg, ownerID) + + // IB admin can read + err := CheckAccessRead(db, ownerID, projectID, "") + if err != nil { + t.Errorf("IB admin should have read access: %v", err) + } + + // IB admin can write + err = CheckAccessWrite(db, ownerID, projectID, "") + if err != nil { + t.Errorf("IB admin should have write access: %v", err) + } + + // IB admin can delete + err = CheckAccessDelete(db, ownerID, projectID, "") + if err != nil { + t.Errorf("IB admin should have delete access: %v", err) + } + + // Create seller user + sellerID := testUser(t, db, cfg, projectID, RoleSellerMember) + + // Seller can read + err = CheckAccessRead(db, sellerID, projectID, "") + if err != nil { + t.Errorf("seller should have read access: %v", err) + } + + // Seller can write + err = CheckAccessWrite(db, sellerID, projectID, "") + if err != nil { + t.Errorf("seller should have write access: %v", err) + } + + // Create buyer user (read-only) + buyerID := testUser(t, db, cfg, projectID, RoleBuyerMember) + + // Buyer can read + err = CheckAccessRead(db, buyerID, projectID, "") + if err != nil { + t.Errorf("buyer should have read access: %v", err) + } + + // Buyer cannot write + err = CheckAccessWrite(db, buyerID, projectID, "") + if err != ErrAccessDenied { + t.Errorf("buyer should NOT have write access, got: %v", err) + } +} + +func TestRoleHierarchy(t *testing.T) { + // Verify hierarchy levels: buyer < seller_member < seller_admin < ib_analyst < ib_member < ib_admin + expected := []struct { + role string + level int + }{ + {RoleObserver, 10}, + {RoleBuyerMember, 30}, + {RoleBuyerAdmin, 40}, + {RoleSellerMember, 50}, + {RoleSellerAdmin, 70}, + {RoleIBMember, 80}, + {RoleIBAdmin, 100}, + } + + for _, tc := range expected { + level, ok := RoleHierarchy[tc.role] + if !ok { + t.Errorf("role %s not in hierarchy", tc.role) + continue + } + if level != tc.level { + t.Errorf("role %s: expected level %d, got %d", tc.role, tc.level, level) + } + } + + // Verify ordering + if RoleHierarchy[RoleBuyerMember] >= RoleHierarchy[RoleSellerMember] { + t.Error("buyer should be lower than seller_member") + } + if RoleHierarchy[RoleSellerMember] >= RoleHierarchy[RoleSellerAdmin] { + t.Error("seller_member should be lower than seller_admin") + } + if RoleHierarchy[RoleSellerAdmin] >= RoleHierarchy[RoleIBMember] { + t.Error("seller_admin should be lower than ib_member") + } + if RoleHierarchy[RoleIBMember] >= RoleHierarchy[RoleIBAdmin] { + t.Error("ib_member should be lower than ib_admin") + } +} + +func TestCanGrantRole(t *testing.T) { + tests := []struct { + granter string + target string + canDo bool + }{ + // IB admin can grant anything + {RoleIBAdmin, RoleIBAdmin, true}, + {RoleIBAdmin, RoleIBMember, true}, + {RoleIBAdmin, RoleSellerAdmin, true}, + {RoleIBAdmin, RoleBuyerMember, true}, + + // IB member can grant lower roles + {RoleIBMember, RoleIBAdmin, false}, + {RoleIBMember, RoleIBMember, true}, + {RoleIBMember, RoleSellerAdmin, true}, + + // Seller admin can grant seller and buyer roles + {RoleSellerAdmin, RoleIBMember, false}, + {RoleSellerAdmin, RoleSellerAdmin, true}, + {RoleSellerAdmin, RoleSellerMember, true}, + {RoleSellerAdmin, RoleBuyerMember, true}, + + // Buyer cannot grant higher roles + {RoleBuyerAdmin, RoleSellerMember, false}, + {RoleBuyerAdmin, RoleBuyerAdmin, true}, + {RoleBuyerAdmin, RoleBuyerMember, true}, + } + + for _, tc := range tests { + result := CanGrantRole(tc.granter, tc.target) + if result != tc.canDo { + t.Errorf("CanGrantRole(%s, %s) = %v, want %v", tc.granter, tc.target, result, tc.canDo) + } + } +} + +func TestGrantRevoke(t *testing.T) { + db, cfg := testDB(t) + + // Create admin + adminID := uuid.New().String() + now := time.Now().UnixMilli() + admin := &User{ + UserID: adminID, + Email: "admin@test.com", + Name: "Admin", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, admin) + projectID := testProject(t, db, cfg, adminID) + + // Create user with no access + userID := uuid.New().String() + user := &User{ + UserID: userID, + Email: "user@test.com", + Name: "User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, user) + + // Verify no access + err := CheckAccessRead(db, userID, projectID, "") + if err != ErrAccessDenied { + t.Error("user should have no access initially") + } + + // Grant access + accessID := uuid.New().String() + access := &Access{ + ID: accessID, + ProjectID: projectID, + UserID: userID, + Role: RoleBuyerMember, + Ops: "r", + CanGrant: false, + GrantedBy: adminID, + GrantedAt: now, + } + if err := AccessGrant(db, access); err != nil { + t.Fatalf("AccessGrant: %v", err) + } + + // Verify access granted + err = CheckAccessRead(db, userID, projectID, "") + if err != nil { + t.Errorf("user should have read access after grant: %v", err) + } + + // Revoke access + if err := AccessRevoke(db, accessID, adminID); err != nil { + t.Fatalf("AccessRevoke: %v", err) + } + + // Verify access revoked + err = CheckAccessRead(db, userID, projectID, "") + if err != ErrAccessDenied { + t.Error("user should have no access after revoke") + } +} + +func TestIsBuyerRole(t *testing.T) { + buyers := []string{RoleBuyerAdmin, RoleBuyerMember} + nonBuyers := []string{RoleIBAdmin, RoleIBMember, RoleSellerAdmin, RoleSellerMember, RoleObserver} + + for _, role := range buyers { + if !IsBuyerRole(role) { + t.Errorf("%s should be buyer role", role) + } + } + + for _, role := range nonBuyers { + if IsBuyerRole(role) { + t.Errorf("%s should NOT be buyer role", role) + } + } +} + +func TestGetUserHighestRole(t *testing.T) { + db, cfg := testDB(t) + + // Create admin and project + adminID := uuid.New().String() + now := time.Now().UnixMilli() + admin := &User{ + UserID: adminID, + Email: "admin@test.com", + Name: "Admin", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, admin) + projectID := testProject(t, db, cfg, adminID) + + // Admin's highest role should be ib_admin + role, err := GetUserHighestRole(db, adminID, projectID) + if err != nil { + t.Fatalf("GetUserHighestRole: %v", err) + } + if role != RoleIBAdmin { + t.Errorf("expected ib_admin, got %s", role) + } + + // Create user with multiple roles + userID := uuid.New().String() + user := &User{ + UserID: userID, + Email: "multi@test.com", + Name: "Multi Role User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, user) + + // Grant buyer role + AccessGrant(db, &Access{ + ID: uuid.New().String(), + ProjectID: projectID, + UserID: userID, + Role: RoleBuyerMember, + Ops: "r", + GrantedBy: adminID, + GrantedAt: now, + }) + + // Grant seller role (higher) + AccessGrant(db, &Access{ + ID: uuid.New().String(), + ProjectID: projectID, + UserID: userID, + Role: RoleSellerMember, + Ops: "rw", + GrantedBy: adminID, + GrantedAt: now, + }) + + // Highest should be seller_member + role, err = GetUserHighestRole(db, userID, projectID) + if err != nil { + t.Fatalf("GetUserHighestRole: %v", err) + } + if role != RoleSellerMember { + t.Errorf("expected seller_member (highest), got %s", role) + } + + // User with no access + noAccessID := uuid.New().String() + noAccessUser := &User{ + UserID: noAccessID, + Email: "noaccess@test.com", + Name: "No Access", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, noAccessUser) + + _, err = GetUserHighestRole(db, noAccessID, projectID) + if err != ErrAccessDenied { + t.Errorf("expected ErrAccessDenied for user with no access, got %v", err) + } +} + +func TestWorkstreamAccess(t *testing.T) { + db, cfg := testDB(t) + + // Create admin and project + adminID := uuid.New().String() + now := time.Now().UnixMilli() + admin := &User{ + UserID: adminID, + Email: "admin@test.com", + Name: "Admin", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, admin) + projectID := testProject(t, db, cfg, adminID) + + // Create user with access to specific workstream only + userID := uuid.New().String() + user := &User{ + UserID: userID, + Email: "ws@test.com", + Name: "Workstream User", + Password: "$2a$10$test", + Active: true, + CreatedAt: now, + UpdatedAt: now, + } + UserCreate(db, user) + + workstreamID := "workstream-1" + AccessGrant(db, &Access{ + ID: uuid.New().String(), + ProjectID: projectID, + WorkstreamID: workstreamID, + UserID: userID, + Role: RoleSellerMember, + Ops: "rw", + GrantedBy: adminID, + GrantedAt: now, + }) + + // User has access to their workstream + _, err := CheckAccess(db, userID, projectID, workstreamID, "r") + if err != nil { + t.Errorf("user should have access to their workstream: %v", err) + } + + // User should NOT have access to different workstream + _, err = CheckAccess(db, userID, projectID, "different-workstream", "r") + if err != ErrAccessDenied { + t.Error("user should NOT have access to different workstream") + } +} diff --git a/lib/watermark.go b/lib/watermark.go index 63cc002..d63cdbc 100644 --- a/lib/watermark.go +++ b/lib/watermark.go @@ -1,43 +1,571 @@ package lib +import ( + "archive/zip" + "bytes" + "fmt" + "image" + "image/color" + "image/draw" + "image/gif" + "image/jpeg" + "image/png" + "io" + "regexp" + "strings" + + "github.com/pdfcpu/pdfcpu/pkg/api" + "github.com/pdfcpu/pdfcpu/pkg/pdfcpu/model" + "github.com/pdfcpu/pdfcpu/pkg/pdfcpu/types" +) + // Watermark applies per-request watermarks at serve time. // Stored files are always clean originals; watermarks are injected on read. // // Supported types: -// - PDF: dynamic watermark (user + timestamp + org) -// - Word (.docx): watermark injected into document XML -// - Excel (.xlsx): sheet protection + watermark header row -// - Images: watermark text burned into pixel data -// - Video: watermark overlay via ffmpeg, served as stream -// - Other: encrypted download only, no preview +// - PDF: diagonal text watermark (pdfcpu) +// - Word (.docx): watermark shape in header +// - Images: text overlay in bottom-right +// - Other: pass-through unchanged -// WatermarkConfig holds per-project watermark settings. -type WatermarkConfig struct { - Template string // e.g. "{user_name} · {org_name} · {datetime} · CONFIDENTIAL" - Opacity float64 - Position string // "diagonal" | "footer" | "header" +// Watermark dispatches to the appropriate watermarking function based on MIME type. +func Watermark(in []byte, mimeType string, label string) ([]byte, error) { + switch mimeType { + case "application/pdf": + return WatermarkPDF(in, label) + case "image/jpeg", "image/png", "image/gif": + return WatermarkImage(in, mimeType, label) + case "application/vnd.openxmlformats-officedocument.wordprocessingml.document": + return WatermarkDOCX(in, label) + default: + return in, nil // pass through unsupported types + } } -// WatermarkPDF applies a watermark to PDF data. Stub. -func WatermarkPDF(data []byte, userName, orgName string, wc *WatermarkConfig) ([]byte, error) { - // TODO: implement PDF watermarking - return data, nil +// WatermarkPDF applies a diagonal text watermark to every page of a PDF. +// Uses pdfcpu for PDF manipulation. +// Label format: "CONFIDENTIAL — {UserName} — {Date} — {ProjectName}" +func WatermarkPDF(in []byte, label string) ([]byte, error) { + if len(in) == 0 { + return nil, fmt.Errorf("empty input") + } + + // Build watermark description string for pdfcpu + // Format: "text, fontname:Helvetica, fontsize:36, color:0.5 0.5 0.5, opacity:0.3, rotation:45, diagonal:1, scale:1 abs" + wmDesc := fmt.Sprintf("%s, fontname:Helvetica, fontsize:36, color:0.5 0.5 0.5, opacity:0.3, rotation:45, diagonal:1, scale:1 abs", + escapeWatermarkText(label)) + + wm, err := api.TextWatermark(wmDesc, "", true, false, types.POINTS) + if err != nil { + return nil, fmt.Errorf("create watermark: %w", err) + } + + // Apply watermark to all pages + var out bytes.Buffer + inReader := bytes.NewReader(in) + + conf := model.NewDefaultConfiguration() + conf.ValidationMode = model.ValidationRelaxed + + if err := api.AddWatermarks(inReader, &out, nil, wm, conf); err != nil { + return nil, fmt.Errorf("apply watermark: %w", err) + } + + return out.Bytes(), nil } -// WatermarkDOCX applies a watermark to a Word document. Stub. -func WatermarkDOCX(data []byte, userName, orgName string, wc *WatermarkConfig) ([]byte, error) { - // TODO: implement DOCX watermarking - return data, nil +// escapeWatermarkText escapes special characters for pdfcpu watermark text. +func escapeWatermarkText(text string) string { + // Escape commas and colons which have special meaning in pdfcpu + text = strings.ReplaceAll(text, ",", "\\,") + text = strings.ReplaceAll(text, ":", "\\:") + return text } -// WatermarkXLSX applies a watermark to an Excel spreadsheet. Stub. -func WatermarkXLSX(data []byte, userName, orgName string, wc *WatermarkConfig) ([]byte, error) { - // TODO: implement XLSX watermarking - return data, nil +// WatermarkImage applies a text watermark to an image. +// Supports JPEG, PNG, and GIF (first frame only for GIF). +// The watermark is placed in the bottom-right corner with semi-transparent white text and dark shadow. +func WatermarkImage(in []byte, mimeType string, label string) ([]byte, error) { + if len(in) == 0 { + return nil, fmt.Errorf("empty input") + } + + reader := bytes.NewReader(in) + + // Handle GIF specially (only watermark first frame) + if mimeType == "image/gif" { + return watermarkGIF(reader, label) + } + + // Decode image + img, format, err := image.Decode(reader) + if err != nil { + return nil, fmt.Errorf("decode image: %w", err) + } + + // Create a new RGBA image to draw on + bounds := img.Bounds() + rgba := image.NewRGBA(bounds) + draw.Draw(rgba, bounds, img, bounds.Min, draw.Src) + + // Draw watermark text + drawWatermarkText(rgba, label) + + // Encode output + var out bytes.Buffer + switch format { + case "jpeg": + if err := jpeg.Encode(&out, rgba, &jpeg.Options{Quality: 90}); err != nil { + return nil, fmt.Errorf("encode jpeg: %w", err) + } + case "png": + if err := png.Encode(&out, rgba); err != nil { + return nil, fmt.Errorf("encode png: %w", err) + } + default: + // For other formats decoded by image.Decode, output as PNG + if err := png.Encode(&out, rgba); err != nil { + return nil, fmt.Errorf("encode png: %w", err) + } + } + + return out.Bytes(), nil } -// WatermarkImage applies a watermark to image data. Stub. -func WatermarkImage(data []byte, userName, orgName string, wc *WatermarkConfig) ([]byte, error) { - // TODO: implement image watermarking - return data, nil +// watermarkGIF handles GIF watermarking (first frame only). +func watermarkGIF(reader io.Reader, label string) ([]byte, error) { + g, err := gif.DecodeAll(reader) + if err != nil { + return nil, fmt.Errorf("decode gif: %w", err) + } + + if len(g.Image) == 0 { + return nil, fmt.Errorf("gif has no frames") + } + + // Watermark first frame + firstFrame := g.Image[0] + bounds := firstFrame.Bounds() + rgba := image.NewRGBA(bounds) + draw.Draw(rgba, bounds, firstFrame, bounds.Min, draw.Src) + + drawWatermarkText(rgba, label) + + // Convert back to paletted image + paletted := image.NewPaletted(bounds, firstFrame.Palette) + draw.Draw(paletted, bounds, rgba, bounds.Min, draw.Src) + g.Image[0] = paletted + + var out bytes.Buffer + if err := gif.EncodeAll(&out, g); err != nil { + return nil, fmt.Errorf("encode gif: %w", err) + } + + return out.Bytes(), nil +} + +// drawWatermarkText draws watermark text on an RGBA image. +// Uses a simple pixel-based text rendering (8x8 bitmap font style). +// Text is placed in bottom-right corner with shadow effect. +func drawWatermarkText(img *image.RGBA, label string) { + bounds := img.Bounds() + width := bounds.Dx() + height := bounds.Dy() + + // Calculate font scale based on image size + scale := 1 + if width > 1000 { + scale = 2 + } + if width > 2000 { + scale = 3 + } + + // Measure text + charWidth := 6 * scale + charHeight := 10 * scale + textWidth := len(label) * charWidth + textHeight := charHeight + + // Position: bottom-right with padding + padding := 10 * scale + x := width - textWidth - padding + y := height - textHeight - padding + + // Clamp to image bounds + if x < padding { + x = padding + } + if y < padding { + y = padding + } + + // Draw shadow (dark, offset by 1-2 pixels) + shadowColor := color.RGBA{0, 0, 0, 180} + for i := 1; i <= scale; i++ { + drawText(img, label, x+i, y+i, charWidth, charHeight, shadowColor, scale) + } + + // Draw main text (semi-transparent white) + textColor := color.RGBA{255, 255, 255, 200} + drawText(img, label, x, y, charWidth, charHeight, textColor, scale) +} + +// drawText draws text using a simple bitmap font approach. +// This is a basic implementation that renders readable ASCII text. +func drawText(img *image.RGBA, text string, startX, startY, charWidth, charHeight int, c color.Color, scale int) { + bounds := img.Bounds() + + for i, ch := range text { + x := startX + i*charWidth + if x >= bounds.Max.X { + break + } + + // Get character bitmap + bitmap := getCharBitmap(ch) + if bitmap == nil { + continue + } + + // Draw character + for row := 0; row < 8; row++ { + for col := 0; col < 5; col++ { + if bitmap[row]&(1<<(4-col)) != 0 { + // Draw scaled pixel + for sy := 0; sy < scale; sy++ { + for sx := 0; sx < scale; sx++ { + px := x + col*scale + sx + py := startY + row*scale + sy + if px >= bounds.Min.X && px < bounds.Max.X && py >= bounds.Min.Y && py < bounds.Max.Y { + img.Set(px, py, c) + } + } + } + } + } + } + } +} + +// getCharBitmap returns an 8-row bitmap for a character (5 bits per row). +// Basic ASCII font supporting uppercase, lowercase, digits, and common punctuation. +func getCharBitmap(ch rune) []byte { + // 5x8 bitmap font - each byte represents one row, 5 bits used + fonts := map[rune][]byte{ + 'A': {0x0E, 0x11, 0x11, 0x1F, 0x11, 0x11, 0x11, 0x00}, + 'B': {0x1E, 0x11, 0x11, 0x1E, 0x11, 0x11, 0x1E, 0x00}, + 'C': {0x0E, 0x11, 0x10, 0x10, 0x10, 0x11, 0x0E, 0x00}, + 'D': {0x1E, 0x11, 0x11, 0x11, 0x11, 0x11, 0x1E, 0x00}, + 'E': {0x1F, 0x10, 0x10, 0x1E, 0x10, 0x10, 0x1F, 0x00}, + 'F': {0x1F, 0x10, 0x10, 0x1E, 0x10, 0x10, 0x10, 0x00}, + 'G': {0x0E, 0x11, 0x10, 0x17, 0x11, 0x11, 0x0E, 0x00}, + 'H': {0x11, 0x11, 0x11, 0x1F, 0x11, 0x11, 0x11, 0x00}, + 'I': {0x0E, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0E, 0x00}, + 'J': {0x07, 0x02, 0x02, 0x02, 0x02, 0x12, 0x0C, 0x00}, + 'K': {0x11, 0x12, 0x14, 0x18, 0x14, 0x12, 0x11, 0x00}, + 'L': {0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x1F, 0x00}, + 'M': {0x11, 0x1B, 0x15, 0x15, 0x11, 0x11, 0x11, 0x00}, + 'N': {0x11, 0x19, 0x15, 0x13, 0x11, 0x11, 0x11, 0x00}, + 'O': {0x0E, 0x11, 0x11, 0x11, 0x11, 0x11, 0x0E, 0x00}, + 'P': {0x1E, 0x11, 0x11, 0x1E, 0x10, 0x10, 0x10, 0x00}, + 'Q': {0x0E, 0x11, 0x11, 0x11, 0x15, 0x12, 0x0D, 0x00}, + 'R': {0x1E, 0x11, 0x11, 0x1E, 0x14, 0x12, 0x11, 0x00}, + 'S': {0x0E, 0x11, 0x10, 0x0E, 0x01, 0x11, 0x0E, 0x00}, + 'T': {0x1F, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00}, + 'U': {0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x0E, 0x00}, + 'V': {0x11, 0x11, 0x11, 0x11, 0x11, 0x0A, 0x04, 0x00}, + 'W': {0x11, 0x11, 0x11, 0x15, 0x15, 0x1B, 0x11, 0x00}, + 'X': {0x11, 0x11, 0x0A, 0x04, 0x0A, 0x11, 0x11, 0x00}, + 'Y': {0x11, 0x11, 0x0A, 0x04, 0x04, 0x04, 0x04, 0x00}, + 'Z': {0x1F, 0x01, 0x02, 0x04, 0x08, 0x10, 0x1F, 0x00}, + 'a': {0x00, 0x00, 0x0E, 0x01, 0x0F, 0x11, 0x0F, 0x00}, + 'b': {0x10, 0x10, 0x1E, 0x11, 0x11, 0x11, 0x1E, 0x00}, + 'c': {0x00, 0x00, 0x0E, 0x11, 0x10, 0x11, 0x0E, 0x00}, + 'd': {0x01, 0x01, 0x0F, 0x11, 0x11, 0x11, 0x0F, 0x00}, + 'e': {0x00, 0x00, 0x0E, 0x11, 0x1F, 0x10, 0x0E, 0x00}, + 'f': {0x06, 0x09, 0x08, 0x1C, 0x08, 0x08, 0x08, 0x00}, + 'g': {0x00, 0x00, 0x0F, 0x11, 0x0F, 0x01, 0x0E, 0x00}, + 'h': {0x10, 0x10, 0x1E, 0x11, 0x11, 0x11, 0x11, 0x00}, + 'i': {0x04, 0x00, 0x0C, 0x04, 0x04, 0x04, 0x0E, 0x00}, + 'j': {0x02, 0x00, 0x06, 0x02, 0x02, 0x12, 0x0C, 0x00}, + 'k': {0x10, 0x10, 0x12, 0x14, 0x18, 0x14, 0x12, 0x00}, + 'l': {0x0C, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0E, 0x00}, + 'm': {0x00, 0x00, 0x1A, 0x15, 0x15, 0x15, 0x15, 0x00}, + 'n': {0x00, 0x00, 0x1E, 0x11, 0x11, 0x11, 0x11, 0x00}, + 'o': {0x00, 0x00, 0x0E, 0x11, 0x11, 0x11, 0x0E, 0x00}, + 'p': {0x00, 0x00, 0x1E, 0x11, 0x1E, 0x10, 0x10, 0x00}, + 'q': {0x00, 0x00, 0x0F, 0x11, 0x0F, 0x01, 0x01, 0x00}, + 'r': {0x00, 0x00, 0x16, 0x19, 0x10, 0x10, 0x10, 0x00}, + 's': {0x00, 0x00, 0x0F, 0x10, 0x0E, 0x01, 0x1E, 0x00}, + 't': {0x08, 0x08, 0x1C, 0x08, 0x08, 0x09, 0x06, 0x00}, + 'u': {0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x0F, 0x00}, + 'v': {0x00, 0x00, 0x11, 0x11, 0x11, 0x0A, 0x04, 0x00}, + 'w': {0x00, 0x00, 0x11, 0x11, 0x15, 0x15, 0x0A, 0x00}, + 'x': {0x00, 0x00, 0x11, 0x0A, 0x04, 0x0A, 0x11, 0x00}, + 'y': {0x00, 0x00, 0x11, 0x11, 0x0F, 0x01, 0x0E, 0x00}, + 'z': {0x00, 0x00, 0x1F, 0x02, 0x04, 0x08, 0x1F, 0x00}, + '0': {0x0E, 0x11, 0x13, 0x15, 0x19, 0x11, 0x0E, 0x00}, + '1': {0x04, 0x0C, 0x04, 0x04, 0x04, 0x04, 0x0E, 0x00}, + '2': {0x0E, 0x11, 0x01, 0x02, 0x04, 0x08, 0x1F, 0x00}, + '3': {0x0E, 0x11, 0x01, 0x06, 0x01, 0x11, 0x0E, 0x00}, + '4': {0x02, 0x06, 0x0A, 0x12, 0x1F, 0x02, 0x02, 0x00}, + '5': {0x1F, 0x10, 0x1E, 0x01, 0x01, 0x11, 0x0E, 0x00}, + '6': {0x06, 0x08, 0x10, 0x1E, 0x11, 0x11, 0x0E, 0x00}, + '7': {0x1F, 0x01, 0x02, 0x04, 0x08, 0x08, 0x08, 0x00}, + '8': {0x0E, 0x11, 0x11, 0x0E, 0x11, 0x11, 0x0E, 0x00}, + '9': {0x0E, 0x11, 0x11, 0x0F, 0x01, 0x02, 0x0C, 0x00}, + ' ': {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + '.': {0x00, 0x00, 0x00, 0x00, 0x00, 0x0C, 0x0C, 0x00}, + ',': {0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x04}, + '-': {0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00}, + '_': {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x00}, + ':': {0x00, 0x0C, 0x0C, 0x00, 0x0C, 0x0C, 0x00, 0x00}, + '/': {0x01, 0x02, 0x02, 0x04, 0x08, 0x08, 0x10, 0x00}, + '@': {0x0E, 0x11, 0x17, 0x15, 0x17, 0x10, 0x0E, 0x00}, + '(': {0x02, 0x04, 0x08, 0x08, 0x08, 0x04, 0x02, 0x00}, + ')': {0x08, 0x04, 0x02, 0x02, 0x02, 0x04, 0x08, 0x00}, + } + + if bitmap, ok := fonts[ch]; ok { + return bitmap + } + // Return a small square for unknown characters + return []byte{0x00, 0x1F, 0x11, 0x11, 0x11, 0x1F, 0x00, 0x00} +} + +// WatermarkDOCX adds a text watermark to a Word document. +// DOCX is a ZIP file; we unzip, modify word/document.xml to add a watermark shape in the header, and rezip. +func WatermarkDOCX(in []byte, label string) ([]byte, error) { + if len(in) == 0 { + return nil, fmt.Errorf("empty input") + } + + reader := bytes.NewReader(in) + zipReader, err := zip.NewReader(reader, int64(len(in))) + if err != nil { + return nil, fmt.Errorf("open zip: %w", err) + } + + var out bytes.Buffer + zipWriter := zip.NewWriter(&out) + + // Track which relationship files we've seen + var hasHeader1 bool + headerRelId := "rIdWatermarkHeader" + + // First pass: check if header1.xml exists + for _, f := range zipReader.File { + if f.Name == "word/header1.xml" { + hasHeader1 = true + break + } + } + + for _, f := range zipReader.File { + rc, err := f.Open() + if err != nil { + return nil, fmt.Errorf("open %s: %w", f.Name, err) + } + + content, err := io.ReadAll(rc) + rc.Close() + if err != nil { + return nil, fmt.Errorf("read %s: %w", f.Name, err) + } + + // Modify content based on file + switch f.Name { + case "word/document.xml": + // Add header reference to document if not already present + if !hasHeader1 { + content = addHeaderReferenceToDocument(content, headerRelId) + } + case "word/header1.xml": + // Add watermark to existing header + content = addWatermarkToHeader(content, label) + case "word/_rels/document.xml.rels": + // Add relationship for header if we created one + if !hasHeader1 { + content = addHeaderRelationship(content, headerRelId) + } + case "[Content_Types].xml": + // Ensure header content type is registered + if !hasHeader1 { + content = ensureHeaderContentType(content) + } + } + + // Write modified content + w, err := zipWriter.Create(f.Name) + if err != nil { + return nil, fmt.Errorf("create %s: %w", f.Name, err) + } + if _, err := w.Write(content); err != nil { + return nil, fmt.Errorf("write %s: %w", f.Name, err) + } + } + + // If no header existed, create one with the watermark + if !hasHeader1 { + header := createWatermarkHeader(label) + w, err := zipWriter.Create("word/header1.xml") + if err != nil { + return nil, fmt.Errorf("create header: %w", err) + } + if _, err := w.Write(header); err != nil { + return nil, fmt.Errorf("write header: %w", err) + } + } + + if err := zipWriter.Close(); err != nil { + return nil, fmt.Errorf("close zip: %w", err) + } + + return out.Bytes(), nil +} + +// createWatermarkHeader creates a new header XML with a diagonal watermark shape. +func createWatermarkHeader(label string) []byte { + // VML shape for diagonal watermark text + // The shape uses a text path to render the watermark diagonally + header := fmt.Sprintf(` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +`, escapeXML(label)) + return []byte(header) +} + +// addWatermarkToHeader adds a watermark shape to an existing header. +func addWatermarkToHeader(content []byte, label string) []byte { + // Insert watermark shape before + watermarkShape := fmt.Sprintf(` + + + + + + + + + + + + + + `, escapeXML(label)) + + // Find and insert before it + str := string(content) + if idx := strings.LastIndex(str, ""); idx != -1 { + str = str[:idx] + watermarkShape + str[idx:] + } + return []byte(str) +} + +// addHeaderReferenceToDocument adds a header reference to the document.xml. +func addHeaderReferenceToDocument(content []byte, relId string) []byte { + str := string(content) + + // Find the sectPr element and add headerReference + // Look for + headerRef := fmt.Sprintf(``, relId) + + sectPrPattern := regexp.MustCompile(`(]*>)`) + if sectPrPattern.MatchString(str) { + str = sectPrPattern.ReplaceAllString(str, "${1}"+headerRef) + } else { + // No sectPr exists, add one before + sectPr := fmt.Sprintf(`%s`, headerRef) + if idx := strings.LastIndex(str, ""); idx != -1 { + str = str[:idx] + sectPr + str[idx:] + } + } + + // Ensure r namespace is declared + if !strings.Contains(str, `xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"`) { + str = strings.Replace(str, ``, relId) + + // Insert before + if idx := strings.LastIndex(str, ""); idx != -1 { + str = str[:idx] + rel + str[idx:] + } + return []byte(str) +} + +// ensureHeaderContentType ensures the header content type is in [Content_Types].xml. +func ensureHeaderContentType(content []byte) []byte { + str := string(content) + if strings.Contains(str, "header1.xml") { + return content + } + + override := `` + if idx := strings.LastIndex(str, ""); idx != -1 { + str = str[:idx] + override + str[idx:] + } + return []byte(str) +} + +// escapeXML escapes special XML characters. +func escapeXML(s string) string { + s = strings.ReplaceAll(s, "&", "&") + s = strings.ReplaceAll(s, "<", "<") + s = strings.ReplaceAll(s, ">", ">") + s = strings.ReplaceAll(s, "'", "'") + s = strings.ReplaceAll(s, "\"", """) + return s } diff --git a/portal/emails/answer_approved.html b/portal/emails/answer_approved.html new file mode 100644 index 0000000..3d47e58 --- /dev/null +++ b/portal/emails/answer_approved.html @@ -0,0 +1,85 @@ +{{define "answer_approved.html"}} + + + + + + Your answer was approved ✓ + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+ +
+
+ ✓ +
+
+ +

+ Your answer was approved +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ Great news! Your answer for {{.RequestTitle}} has been approved. +

+ + {{if .Published}} +
+

+ 📁 Published to Data Room — Your response is now visible to authorized buyers. +

+
+ {{end}} + + {{if .DataRoomURL}} + + + + + +
+ View in Data Room +
+ {{end}} + +

+ Thank you for your prompt response. Keep up the excellent work! +

+
+

+ Questions? Contact support@muskepo.com +

+

+ © 2026 Dealspace · Privacy Policy · Terms of Service +

+
+
+ + +{{end}} diff --git a/portal/emails/answer_rejected.html b/portal/emails/answer_rejected.html new file mode 100644 index 0000000..3a5da95 --- /dev/null +++ b/portal/emails/answer_rejected.html @@ -0,0 +1,76 @@ +{{define "answer_rejected.html"}} + + + + + + Your answer needs revision + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+

+ Your answer needs revision +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ Your answer for {{.RequestTitle}} requires some changes before it can be approved. +

+ + + {{if .Reason}} +
+

Feedback

+

{{.Reason}}

+
+ {{end}} + + + + + + +
+ View Feedback +
+ +

+ Please update your response based on the feedback above. If you have any questions, you can reply directly in the request thread. +

+
+

+ Questions? Contact support@muskepo.com +

+

+ © 2026 Dealspace · Privacy Policy · Terms of Service +

+
+
+ + +{{end}} diff --git a/portal/emails/answer_submitted.html b/portal/emails/answer_submitted.html new file mode 100644 index 0000000..a496ead --- /dev/null +++ b/portal/emails/answer_submitted.html @@ -0,0 +1,92 @@ +{{define "answer_submitted.html"}} + + + + + + {{.AnswererName}} submitted an answer for: {{.RequestTitle}} + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+
+ ACTION REQUIRED +
+ +

+ New answer submitted for review +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ {{.AnswererName}} has submitted an answer that needs your review. +

+ + + + + + +
+

Request

+

{{.RequestTitle}}

+ + {{if .WorkstreamName}} +

Workstream

+

{{.WorkstreamName}}

+ {{end}} + + {{if .AnswerPreview}} +

Preview

+

"{{truncate .AnswerPreview 200}}"

+ {{end}} +
+ + + + + + +
+ Review Answer +
+ +

+ Once approved, this answer will be published to the data room and visible to authorized buyers. +

+
+

+ Questions? Contact support@muskepo.com +

+

+ © 2026 Dealspace · Privacy Policy · Terms of Service +

+
+
+ + +{{end}} diff --git a/portal/emails/invite.html b/portal/emails/invite.html new file mode 100644 index 0000000..3ba8281 --- /dev/null +++ b/portal/emails/invite.html @@ -0,0 +1,76 @@ +{{define "invite.html"}} + + + + + + You're invited to {{.ProjectName}} + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+

+ You've been invited to join {{.ProjectName}} +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ {{.InviterName}} from {{.InviterOrg}} has invited you to join the due diligence process for {{.ProjectName}}. +

+ + + + + + +
+ Accept Invitation +
+ + +
+

What is Dealspace?

+

+ Dealspace is a secure platform for managing M&A due diligence. All documents are encrypted and watermarked. You control what gets shared and when. +

+
+ +

+ ⏱ This invitation expires in {{if .ExpiresIn}}{{.ExpiresIn}}{{else}}7 days{{end}}. +

+
+

+ Questions? Contact support@muskepo.com +

+

+ © 2026 Dealspace · Privacy Policy · Terms of Service +

+
+
+ + +{{end}} diff --git a/portal/emails/request_forwarded.html b/portal/emails/request_forwarded.html new file mode 100644 index 0000000..317d184 --- /dev/null +++ b/portal/emails/request_forwarded.html @@ -0,0 +1,85 @@ +{{define "request_forwarded.html"}} + + + + + + {{.SenderName}} forwarded a request to you + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+

+ Request forwarded to you +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ {{.SenderName}} has forwarded a request to you for your input. +

+ + + + + + +
+

Request

+

{{.RequestTitle}}

+ + {{if .HasDueDate}} +
+

Due Date

+

{{.DueDate}}

+
+ {{end}} +
+ + + + + + +
+ View Request +
+ +

+ You can respond to this request directly in Dealspace. Your response will be routed back to {{.SenderName}} for review. +

+
+

+ Questions? Contact support@muskepo.com +

+

+ © 2026 Dealspace · Privacy Policy · Terms of Service +

+
+
+ + +{{end}} diff --git a/portal/emails/tasks_assigned.html b/portal/emails/tasks_assigned.html new file mode 100644 index 0000000..7f73ca3 --- /dev/null +++ b/portal/emails/tasks_assigned.html @@ -0,0 +1,87 @@ +{{define "tasks_assigned.html"}} + + + + + + You have {{.Count}} new task{{if gt .Count 1}}s{{end}} on {{.ProjectName}} + + + + + + +
+ + + + + + + + + + + + + + + +
+

DEALSPACE

+
+

+ You have {{.Count}} new task{{if gt .Count 1}}s{{end}} on {{.ProjectName}} +

+ +

+ Hi{{if .RecipientName}} {{.RecipientName}}{{end}}, +

+ +

+ The following request{{if gt .Count 1}}s have{{else}} has{{end}} been assigned to you: +

+ + + + {{range $i, $task := .Tasks}} + {{if lt $i 5}} + + + + + {{end}} + {{end}} +
+

{{$task.Title}}

+ {{if $task.DueDate}} +

Due: {{$task.DueDate}}{{if eq $task.Priority "high"}} · High Priority{{end}}

+ {{end}} +
+ + {{if gt .Count 5}} +

+ ...and {{sub .Count 5}} more +

+ {{end}} + + + + + + +
+ View My Tasks +
+
+

+ You're receiving this because you're assigned to requests in {{.ProjectName}}. +

+

+ © 2026 Dealspace · Manage Notifications · Unsubscribe +

+
+
+ + +{{end}} diff --git a/portal/templates/app/tasks.html b/portal/templates/app/tasks.html new file mode 100644 index 0000000..e692a34 --- /dev/null +++ b/portal/templates/app/tasks.html @@ -0,0 +1,169 @@ + + + + + + My Tasks — Dealspace + + + + + + + +
+
+ + Dealspace + + +
+
+ + +
+
+ +
+ + + + +
+ +
+

+

Here are your pending tasks.

+
+ + +
+
Loading tasks...
+
+ + + +
+
+ + + + diff --git a/portal/templates/auth/login.html b/portal/templates/auth/login.html new file mode 100644 index 0000000..bb87bc6 --- /dev/null +++ b/portal/templates/auth/login.html @@ -0,0 +1,94 @@ + + + + + + Login — Dealspace + + + + + + +
+ +
+

+ Dealspace +

+

Secure M&A deal management

+
+ + +
+

Sign in

+ + + +
+
+ + +
+
+ + +
+ +
+
+ +

© 2026 Muskepo B.V. — Amsterdam

+
+ + + + diff --git a/portal/templates/auth/setup.html b/portal/templates/auth/setup.html new file mode 100644 index 0000000..1a4bd05 --- /dev/null +++ b/portal/templates/auth/setup.html @@ -0,0 +1,100 @@ + + + + + + Setup — Dealspace + + + + + + +
+ +
+

+ Dealspace +

+

First-time setup

+
+ + +
+

Create admin account

+

This will be the first administrator account for your Dealspace instance.

+ + + + +
+
+ + +
+
+ + +
+
+ + +

Minimum 8 characters

+
+ +
+
+ +

© 2026 Muskepo B.V. — Amsterdam

+
+ + + + diff --git a/website/dpa.html b/website/dpa.html index 1e710b7..bf51c7b 100644 --- a/website/dpa.html +++ b/website/dpa.html @@ -352,6 +352,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/features.html b/website/features.html index ef1985d..486380c 100644 --- a/website/features.html +++ b/website/features.html @@ -580,6 +580,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/index.html b/website/index.html index fe2d5ee..35e9d3e 100644 --- a/website/index.html +++ b/website/index.html @@ -546,6 +546,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/pricing.html b/website/pricing.html index eab4cc8..ca022ac 100644 --- a/website/pricing.html +++ b/website/pricing.html @@ -468,6 +468,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/privacy.html b/website/privacy.html index 45e9a18..a638d25 100644 --- a/website/privacy.html +++ b/website/privacy.html @@ -291,6 +291,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/security.html b/website/security.html index c743f11..f87407e 100644 --- a/website/security.html +++ b/website/security.html @@ -96,16 +96,16 @@
    - +

    SOC 2

    +

    Self-Assessed · Type II in progress

    +
    @@ -563,6 +563,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2
  • diff --git a/website/soc2.html b/website/soc2.html new file mode 100644 index 0000000..87f4acd --- /dev/null +++ b/website/soc2.html @@ -0,0 +1,679 @@ + + + + + + SOC 2 Compliance — Dealspace + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + Self-Assessment · Type II Audit Planned Q4 2026 +
    +

    + SOC 2 Compliance +

    +

    + Dealspace has completed a comprehensive SOC 2 Type II self-assessment. We are preparing for formal audit certification in Q4 2026. +

    +
    +
    + + +
    +
    +

    + Note: This is a self-assessment document. Formal SOC 2 Type II audit is planned for Q4 2026. +

    +
    +
    + + +
    +
    +
    +
    +
    Overview
    +

    What is SOC 2?

    +

    + SOC 2 (System and Organization Controls 2) is an auditing framework developed by the AICPA that evaluates how organizations manage customer data based on five Trust Services Criteria. +

    +

    + For M&A platforms handling confidential deal data, SOC 2 compliance demonstrates a commitment to security, availability, and data protection that investment banks and advisors require. +

    +
    +
    +

    Self-Assessment Summary

    +
    +
    + Security (CC1-CC9) +
    +
    +
    +
    + 95% +
    +
    +
    + Availability (A1) +
    +
    +
    +
    + 95% +
    +
    +
    + Confidentiality (C1) +
    +
    +
    +
    + 98% +
    +
    +
    + Processing Integrity (PI1) +
    +
    +
    +
    + 95% +
    +
    +
    + Privacy (P1-P8) +
    +
    +
    +
    + 95% +
    +
    +
    +
    +

    Assessment Date: February 28, 2026

    +
    +
    +
    +
    +
    + + +
    +
    +
    +
    Scope
    +

    What's Covered

    +

    + Our SOC 2 assessment covers all aspects of the Dealspace platform and infrastructure. +

    +
    + +
    +
    +
    + + + +
    +

    Infrastructure

    +
      +
    • • Production server (Zürich, Switzerland)
    • +
    • • Go application binary
    • +
    • • SQLite encrypted database
    • +
    • • Caddy reverse proxy
    • +
    +
    + +
    +
    + + + +
    +

    Data Types

    +
      +
    • • M&A deal documents
    • +
    • • Financial data
    • +
    • • Transaction details
    • +
    • • Participant information
    • +
    +
    + +
    +
    + + + +
    +

    User Types

    +
      +
    • • Investment bank admins/members
    • +
    • • Seller organizations
    • +
    • • Buyer organizations
    • +
    • • Observers
    • +
    +
    +
    +
    +
    + + +
    +
    +
    +
    Trust Services Criteria
    +

    The Five Pillars

    +

    + SOC 2 evaluates organizations against five Trust Services Criteria. Dealspace implements controls for all five. +

    +
    + +
    + +
    +
    +
    + + + +
    +
    +

    Security (CC1-CC9)

    +

    Protection against unauthorized access, both physical and logical.

    +
    +
    + + + + FIPS 140-3 encryption (AES-256-GCM) +
    +
    + + + + Per-project key derivation (HKDF-SHA256) +
    +
    + + + + Role-based access control (RBAC) +
    +
    + + + + MFA required for IB users +
    +
    +
    +
    +
    + + +
    +
    +
    + + + +
    +
    +

    Availability (A1)

    +

    Systems are available for operation and use as committed.

    +
    +
    + + + + 99.9% uptime SLA +
    +
    + + + + 4-hour recovery time objective +
    +
    + + + + Daily encrypted backups +
    +
    + + + + Swiss data center (Zürich) +
    +
    +
    +
    +
    + + +
    +
    +
    + + + +
    +
    +

    Confidentiality (C1)

    +

    Information designated as confidential is protected as committed.

    +
    +
    + + + + All deal data encrypted at rest +
    +
    + + + + Blind indexes for searchable encryption +
    +
    + + + + TLS 1.3 for all connections +
    +
    + + + + Dynamic document watermarking +
    +
    +
    +
    +
    + + +
    +
    +
    + + + +
    +
    +

    Processing Integrity (PI1)

    +

    System processing is complete, valid, accurate, timely, and authorized.

    +
    +
    + + + + Input validation on all data +
    +
    + + + + Parameterized SQL queries +
    +
    + + + + Optimistic locking (ETag) +
    +
    + + + + ACID transaction compliance +
    +
    +
    +
    +
    + + +
    +
    +
    + + + +
    +
    +

    Privacy (P1-P8)

    +

    Personal information is collected, used, retained, and disclosed in conformity with commitments.

    +
    +
    + + + + GDPR/FADP/CCPA compliant +
    +
    + + + + Data export on request +
    +
    + + + + No third-party tracking +
    +
    + + + + No data sales +
    +
    +
    +
    +
    +
    +
    +
    + + +
    +
    +
    +
    Controls Summary
    +

    Key Security Controls

    +
    + +
    +
    +

    Encryption

    +

    FIPS 140-3 validated AES-256-GCM with per-project keys derived via HKDF-SHA256

    +
    +
    +

    Authentication

    +

    JWT tokens with 1-hour expiry, MFA required for IB users, session management

    +
    +
    +

    Authorization

    +

    Role hierarchy (IB → Seller → Buyer → Observer), invitation-only access

    +
    +
    +

    Infrastructure

    +

    Swiss data center, UFW firewall, SSH key-only, automatic security updates

    +
    +
    +

    Audit Logging

    +

    All access logged with actor, timestamp, IP. 7-year retention for compliance

    +
    +
    +

    Backup & Recovery

    +

    Daily encrypted backups, 4-hour RTO, 24-hour RPO, tested recovery procedures

    +
    +
    +
    +
    + + +
    + +
    + + +
    +
    +
    Status
    +

    Audit Timeline

    + +
    +
    +
    +
    + + + +
    +
    +

    February 2026 — Self-Assessment Complete

    +

    Comprehensive self-assessment against all five Trust Services Criteria completed. Policy documentation created.

    +
    +
    + +
    +
    + + + +
    +
    +

    Q2 2026 — Gap Remediation

    +

    Address recommended action items including backup restore testing and external penetration test.

    +
    +
    + +
    +
    + + + +
    +
    +

    Q4 2026 — Formal SOC 2 Type II Audit

    +

    Engage third-party auditor for formal SOC 2 Type II certification.

    +
    +
    +
    +
    +
    +
    + + +
    +
    +

    Questions About Compliance?

    +

    + Contact our security team for detailed documentation or to discuss your compliance requirements. +

    + +
    +
    + + + + + + + + diff --git a/website/terms.html b/website/terms.html index f702ca7..da03a92 100644 --- a/website/terms.html +++ b/website/terms.html @@ -325,6 +325,7 @@
  • Privacy Policy
  • Terms of Service
  • DPA
  • +
  • SOC 2