clavis-vault: api/lib/cmd updates + drop _old scaffolding
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
5cf089a58e
commit
659caa20b8
File diff suppressed because it is too large
Load Diff
|
|
@ -1,882 +0,0 @@
|
|||
package api
|
||||
|
||||
// Integration tests for the Clavitor vault API.
|
||||
//
|
||||
// The test client authenticates exactly as production does:
|
||||
// - 8-byte L1 key sent as base64url Bearer on every request
|
||||
// - DB filename derived from L1[:4]: clavitor-{base64url(l1[:4])}
|
||||
// - L1 normalized to 16 bytes for AES-128 vault encryption
|
||||
//
|
||||
// Each test gets an isolated vault (temp dir + fresh DB).
|
||||
// Run: go test ./api/... -v
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/johanj/clavitor/lib"
|
||||
)
|
||||
|
||||
// --- test client ---
|
||||
|
||||
type tc struct {
|
||||
srv *httptest.Server
|
||||
bearer string // base64url-encoded L1 key (8 bytes)
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// newTestClient creates an isolated vault and test server.
|
||||
// The L1 key is deterministic so tests are reproducible.
|
||||
func newTestClient(t *testing.T) *tc {
|
||||
t.Helper()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
cfg := &lib.Config{
|
||||
Port: "0",
|
||||
DataDir: tmpDir,
|
||||
SessionTTL: 86400,
|
||||
}
|
||||
|
||||
// Fixed 8-byte L1 key for testing.
|
||||
l1Raw := []byte{0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44}
|
||||
bearer := base64.RawURLEncoding.EncodeToString(l1Raw)
|
||||
|
||||
// DB filename must match what L1Middleware derives: clavitor-{base64url(l1[:4])}
|
||||
prefix := base64.RawURLEncoding.EncodeToString(l1Raw[:4])
|
||||
dbPath := tmpDir + "/clavitor-" + prefix
|
||||
|
||||
db, err := lib.OpenDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("opendb: %v", err)
|
||||
}
|
||||
if err := lib.MigrateDB(db); err != nil {
|
||||
t.Fatalf("migrate: %v", err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
var emptyFS embed.FS
|
||||
srv := httptest.NewServer(NewRouter(cfg, emptyFS))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
return &tc{srv: srv, bearer: bearer, t: t}
|
||||
}
|
||||
|
||||
// req sends an authenticated HTTP request.
|
||||
func (c *tc) req(method, path string, body any) *http.Response {
|
||||
c.t.Helper()
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, _ := json.Marshal(body)
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
req, _ := http.NewRequest(method, c.srv.URL+path, r)
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+c.bearer)
|
||||
resp, err := c.srv.Client().Do(req)
|
||||
if err != nil {
|
||||
c.t.Fatalf("req %s %s: %v", method, path, err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// reqNoAuth sends an unauthenticated request.
|
||||
func (c *tc) reqNoAuth(method, path string, body any) *http.Response {
|
||||
c.t.Helper()
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, _ := json.Marshal(body)
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
req, _ := http.NewRequest(method, c.srv.URL+path, r)
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
resp, err := c.srv.Client().Do(req)
|
||||
if err != nil {
|
||||
c.t.Fatalf("req %s %s: %v", method, path, err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// must asserts status code and returns parsed JSON object.
|
||||
func (c *tc) must(resp *http.Response, wantStatus int) map[string]any {
|
||||
c.t.Helper()
|
||||
defer resp.Body.Close()
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if resp.StatusCode != wantStatus {
|
||||
c.t.Fatalf("expected %d, got %d: %s", wantStatus, resp.StatusCode, body)
|
||||
}
|
||||
var out map[string]any
|
||||
json.Unmarshal(body, &out)
|
||||
return out
|
||||
}
|
||||
|
||||
// mustList asserts status code and returns parsed JSON array.
|
||||
func (c *tc) mustList(resp *http.Response, wantStatus int) []map[string]any {
|
||||
c.t.Helper()
|
||||
defer resp.Body.Close()
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if resp.StatusCode != wantStatus {
|
||||
c.t.Fatalf("expected %d, got %d: %s", wantStatus, resp.StatusCode, body)
|
||||
}
|
||||
var out []map[string]any
|
||||
json.Unmarshal(body, &out)
|
||||
return out
|
||||
}
|
||||
|
||||
// --- test data ---
|
||||
|
||||
func credentialEntry(title, username, password string, urls []string) map[string]any {
|
||||
return map[string]any{
|
||||
"title": title,
|
||||
"type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": title,
|
||||
"type": "credential",
|
||||
"fields": []map[string]any{
|
||||
{"label": "username", "value": username, "kind": "text"},
|
||||
{"label": "password", "value": password, "kind": "password"},
|
||||
},
|
||||
"urls": urls,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Health & Ping
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestHealth(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.reqNoAuth("GET", "/health", nil), 200)
|
||||
if result["status"] != "ok" {
|
||||
t.Errorf("status = %v, want ok", result["status"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.reqNoAuth("GET", "/ping", nil), 200)
|
||||
if result["ok"] != true {
|
||||
t.Errorf("ok = %v, want true", result["ok"])
|
||||
}
|
||||
if result["node"] == nil || result["node"] == "" {
|
||||
t.Error("node should not be empty")
|
||||
}
|
||||
if result["ts"] == nil {
|
||||
t.Error("ts should be present")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// L1 Auth
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestL1Auth_valid_key(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
// Should return empty array, not an auth error
|
||||
c.mustList(c.req("GET", "/api/entries?meta=1", nil), 200)
|
||||
}
|
||||
|
||||
func TestL1Auth_bad_bearer_rejected(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
req, _ := http.NewRequest("GET", c.srv.URL+"/api/entries", nil)
|
||||
req.Header.Set("Authorization", "Bearer not-valid-base64")
|
||||
resp, _ := c.srv.Client().Do(req)
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 401 {
|
||||
t.Errorf("bad bearer should return 401, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestL1Auth_wrong_key_vault_not_found(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
// Valid base64url but points to a non-existent vault
|
||||
wrongL1 := base64.RawURLEncoding.EncodeToString([]byte{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8})
|
||||
req, _ := http.NewRequest("GET", c.srv.URL+"/api/entries", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+wrongL1)
|
||||
resp, _ := c.srv.Client().Do(req)
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 404 {
|
||||
t.Errorf("wrong L1 key should return 404 (vault not found), got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Entry CRUD
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestCreateEntry(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("POST", "/api/entries", credentialEntry("GitHub", "octocat", "hunter2", []string{"https://github.com"})), 201)
|
||||
if result["entry_id"] == nil || result["entry_id"] == "" {
|
||||
t.Fatal("create should return entry_id")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateEntry_missing_title(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
resp := c.req("POST", "/api/entries", map[string]any{"type": "credential"})
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 400 {
|
||||
t.Errorf("missing title should return 400, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadEntry_roundtrip(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", credentialEntry("GitHub", "octocat", "hunter2", nil)), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
got := c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
data := got["data"].(map[string]any)
|
||||
fields := data["fields"].([]any)
|
||||
|
||||
found := map[string]string{}
|
||||
for _, f := range fields {
|
||||
fm := f.(map[string]any)
|
||||
found[fm["label"].(string)] = fm["value"].(string)
|
||||
}
|
||||
if found["username"] != "octocat" {
|
||||
t.Errorf("username = %q, want octocat", found["username"])
|
||||
}
|
||||
if found["password"] != "hunter2" {
|
||||
t.Errorf("password = %q, want hunter2", found["password"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateEntry(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", credentialEntry("Old", "user", "pass", nil)), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
updated := c.must(c.req("PUT", "/api/entries/"+id, map[string]any{
|
||||
"title": "New",
|
||||
"version": 1,
|
||||
"data": map[string]any{
|
||||
"title": "New", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "username", "value": "newuser", "kind": "text"}},
|
||||
},
|
||||
}), 200)
|
||||
|
||||
if updated["title"] != "New" {
|
||||
t.Errorf("title = %v, want New", updated["title"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateEntry_version_conflict(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", credentialEntry("Test", "u", "p", nil)), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
c.must(c.req("PUT", "/api/entries/"+id, map[string]any{
|
||||
"title": "V2", "version": 1,
|
||||
"data": map[string]any{"title": "V2", "type": "credential"},
|
||||
}), 200)
|
||||
|
||||
resp := c.req("PUT", "/api/entries/"+id, map[string]any{
|
||||
"title": "Stale", "version": 1,
|
||||
"data": map[string]any{"title": "Stale", "type": "credential"},
|
||||
})
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 409 {
|
||||
t.Errorf("stale version should return 409, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteEntry(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", credentialEntry("ToDelete", "u", "p", nil)), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
c.must(c.req("DELETE", "/api/entries/"+id, nil), 200)
|
||||
|
||||
entries := c.mustList(c.req("GET", "/api/entries?meta=1", nil), 200)
|
||||
for _, e := range entries {
|
||||
if e["entry_id"] == id {
|
||||
t.Error("deleted entry should not appear in list")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListEntries_meta(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("One", "u", "p", nil)), 201)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("Two", "u", "p", nil)), 201)
|
||||
|
||||
entries := c.mustList(c.req("GET", "/api/entries?meta=1", nil), 200)
|
||||
if len(entries) != 2 {
|
||||
t.Errorf("expected 2 entries, got %d", len(entries))
|
||||
}
|
||||
for _, e := range entries {
|
||||
if e["data"] != nil {
|
||||
t.Error("meta mode should not include field data")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Upsert
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestUpsert_creates_when_new(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "NewEntry", "type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": "NewEntry", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "password", "value": "abc", "kind": "password"}},
|
||||
},
|
||||
}), 201)
|
||||
if result["entry_id"] == nil {
|
||||
t.Fatal("upsert create should return entry_id")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsert_updates_when_exists(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
// Create
|
||||
c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "GitHub", "type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": "GitHub", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "password", "value": "old", "kind": "password"}},
|
||||
},
|
||||
}), 201)
|
||||
|
||||
// Upsert same title → update
|
||||
result := c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "GitHub", "type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": "GitHub", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "password", "value": "new", "kind": "password"}},
|
||||
},
|
||||
}), 200)
|
||||
|
||||
// Verify updated
|
||||
data := result["data"].(map[string]any)
|
||||
fields := data["fields"].([]any)
|
||||
pw := fields[0].(map[string]any)["value"].(string)
|
||||
if pw != "new" {
|
||||
t.Errorf("password should be 'new' after upsert, got %q", pw)
|
||||
}
|
||||
|
||||
// Verify only one entry exists
|
||||
entries := c.mustList(c.req("GET", "/api/entries?meta=1", nil), 200)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("expected 1 entry after upsert, got %d", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsert_case_insensitive(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "GitHub", "type": "credential",
|
||||
"data": map[string]any{"title": "GitHub", "type": "credential", "fields": []map[string]any{{"label": "user", "value": "v1", "kind": "text"}}},
|
||||
}), 201)
|
||||
|
||||
// Upsert with different case → should update, not create
|
||||
c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "github", "type": "credential",
|
||||
"data": map[string]any{"title": "github", "type": "credential", "fields": []map[string]any{{"label": "user", "value": "v2", "kind": "text"}}},
|
||||
}), 200)
|
||||
|
||||
entries := c.mustList(c.req("GET", "/api/entries?meta=1", nil), 200)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("case-insensitive upsert should match existing, got %d entries", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Entry Types — note, card, identity
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestUpsert_note(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "Meeting Notes", "type": "note",
|
||||
"data": map[string]any{
|
||||
"title": "Meeting Notes", "type": "note",
|
||||
"fields": []map[string]any{{"label": "Content", "value": "Discussed Q3 roadmap", "kind": "text"}},
|
||||
"notes": "Follow up next week",
|
||||
},
|
||||
}), 201)
|
||||
id := result["entry_id"].(string)
|
||||
|
||||
got := c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
data := got["data"].(map[string]any)
|
||||
if data["notes"] != "Follow up next week" {
|
||||
t.Errorf("notes = %v", data["notes"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsert_card(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "Amex Platinum", "type": "card",
|
||||
"data": map[string]any{
|
||||
"title": "Amex Platinum", "type": "card",
|
||||
"fields": []map[string]any{
|
||||
{"label": "Cardholder", "value": "Johan Jongsma", "kind": "text"},
|
||||
{"label": "Number", "value": "ENC_BLOB_378282246310005", "kind": "text", "l2": true},
|
||||
{"label": "CVV", "value": "ENC_BLOB_1234", "kind": "text", "l2": true},
|
||||
{"label": "Expiry", "value": "09/28", "kind": "text"},
|
||||
},
|
||||
},
|
||||
}), 201)
|
||||
id := result["entry_id"].(string)
|
||||
|
||||
got := c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
data := got["data"].(map[string]any)
|
||||
fields := data["fields"].([]any)
|
||||
if len(fields) != 4 {
|
||||
t.Fatalf("card should have 4 fields, got %d", len(fields))
|
||||
}
|
||||
// L2 blobs should survive roundtrip
|
||||
number := fields[1].(map[string]any)["value"].(string)
|
||||
if number != "ENC_BLOB_378282246310005" {
|
||||
t.Errorf("card number blob changed: %q", number)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpsert_identity(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("PUT", "/api/entries", map[string]any{
|
||||
"title": "Home Address", "type": "identity",
|
||||
"data": map[string]any{
|
||||
"title": "Home Address", "type": "identity",
|
||||
"fields": []map[string]any{
|
||||
{"label": "Street", "value": "123 Main St", "kind": "text"},
|
||||
{"label": "City", "value": "Springfield", "kind": "text"},
|
||||
{"label": "State", "value": "IL", "kind": "text"},
|
||||
{"label": "ZIP", "value": "62704", "kind": "text"},
|
||||
{"label": "Country", "value": "US", "kind": "text"},
|
||||
},
|
||||
},
|
||||
}), 201)
|
||||
id := result["entry_id"].(string)
|
||||
|
||||
got := c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
data := got["data"].(map[string]any)
|
||||
fields := data["fields"].([]any)
|
||||
if len(fields) != 5 {
|
||||
t.Fatalf("identity should have 5 fields, got %d", len(fields))
|
||||
}
|
||||
city := fields[1].(map[string]any)["value"].(string)
|
||||
if city != "Springfield" {
|
||||
t.Errorf("city = %q", city)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Search
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestSearch(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("GitHub", "u", "p", nil)), 201)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("GitLab", "u", "p", nil)), 201)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("AWS", "u", "p", nil)), 201)
|
||||
|
||||
entries := c.mustList(c.req("GET", "/api/search?q=Git", nil), 200)
|
||||
if len(entries) != 2 {
|
||||
t.Errorf("search for 'Git' should return 2, got %d", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearch_no_query(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
resp := c.req("GET", "/api/search", nil)
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 400 {
|
||||
t.Errorf("missing query should return 400, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TOTP
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestTOTP_valid_code(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
entry := map[string]any{
|
||||
"title": "2FA Test", "type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": "2FA Test", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "totp", "value": "JBSWY3DPEHPK3PXP", "kind": "totp"}},
|
||||
},
|
||||
}
|
||||
created := c.must(c.req("POST", "/api/entries", entry), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
result := c.must(c.req("GET", "/api/ext/totp/"+id, nil), 200)
|
||||
code, _ := result["code"].(string)
|
||||
if len(code) != 6 {
|
||||
t.Errorf("TOTP code = %q, want 6 digits", code)
|
||||
}
|
||||
expiresIn, _ := result["expires_in"].(float64)
|
||||
if expiresIn <= 0 || expiresIn > 30 {
|
||||
t.Errorf("expires_in = %v, want 1-30", expiresIn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTOTP_L2_returns_locked(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
entry := map[string]any{
|
||||
"title": "L2 TOTP", "type": "credential",
|
||||
"data": map[string]any{
|
||||
"title": "L2 TOTP", "type": "credential",
|
||||
"fields": []map[string]any{{"label": "totp", "value": "JBSWY3DPEHPK3PXP", "kind": "totp", "l2": true}},
|
||||
},
|
||||
}
|
||||
created := c.must(c.req("POST", "/api/entries", entry), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
result := c.must(c.req("GET", "/api/ext/totp/"+id, nil), 200)
|
||||
if result["l2"] != true {
|
||||
t.Error("L2 TOTP should return l2:true")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// URL Match
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestURLMatch(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("GitHub", "u", "p", []string{"https://github.com"})), 201)
|
||||
|
||||
matches := c.mustList(c.req("GET", "/api/ext/match?url=https://github.com/login", nil), 200)
|
||||
if len(matches) == 0 {
|
||||
t.Error("should match github.com for github.com/login")
|
||||
}
|
||||
}
|
||||
|
||||
func TestURLMatch_no_match(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/entries", credentialEntry("GitHub", "u", "p", []string{"https://github.com"})), 201)
|
||||
|
||||
matches := c.mustList(c.req("GET", "/api/ext/match?url=https://example.com", nil), 200)
|
||||
if len(matches) != 0 {
|
||||
t.Errorf("should not match, got %d", len(matches))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Password Generator
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestPasswordGenerator(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("GET", "/api/generate?length=24", nil), 200)
|
||||
pw, _ := result["password"].(string)
|
||||
if len(pw) != 24 {
|
||||
t.Errorf("password length = %d, want 24", len(pw))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPasswordGenerator_passphrase(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.req("GET", "/api/generate?words=4", nil), 200)
|
||||
pw, _ := result["password"].(string)
|
||||
words := strings.Split(pw, "-")
|
||||
if len(words) != 4 {
|
||||
t.Errorf("passphrase should have 4 words, got %d: %q", len(words), pw)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Audit Log
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestAuditLog(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", credentialEntry("Audited", "u", "p", nil)), 201)
|
||||
id := created["entry_id"].(string)
|
||||
c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
|
||||
events := c.mustList(c.req("GET", "/api/audit", nil), 200)
|
||||
if len(events) < 2 {
|
||||
t.Errorf("expected at least 2 events (create + read), got %d", len(events))
|
||||
}
|
||||
actions := map[string]bool{}
|
||||
for _, e := range events {
|
||||
if a, ok := e["action"].(string); ok {
|
||||
actions[a] = true
|
||||
}
|
||||
}
|
||||
if !actions["create"] {
|
||||
t.Error("missing 'create' in audit log")
|
||||
}
|
||||
if !actions["read"] {
|
||||
t.Error("missing 'read' in audit log")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WebAuthn Auth Flow
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
func TestAuthStatus_fresh(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
result := c.must(c.reqNoAuth("GET", "/api/auth/status", nil), 200)
|
||||
if result["state"] != "fresh" {
|
||||
t.Errorf("state = %v, want fresh", result["state"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthRegisterBegin_fresh(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
resp := c.reqNoAuth("POST", "/api/auth/register/begin", map[string]any{})
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("expected 200, got %d: %s", resp.StatusCode, b)
|
||||
}
|
||||
var result map[string]any
|
||||
json.NewDecoder(resp.Body).Decode(&result)
|
||||
pk := result["publicKey"].(map[string]any)
|
||||
if pk["challenge"] == nil {
|
||||
t.Fatal("response should contain a challenge")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tier Isolation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// TestTierIsolation verifies that L2/L3 encrypted blobs survive the L1
|
||||
// envelope encrypt/decrypt roundtrip intact. The server packs all fields
|
||||
// into a single AES-GCM envelope (L1). L2/L3 field values are opaque
|
||||
// ciphertext — the server stores them, never inspects them.
|
||||
func TestTierIsolation(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
|
||||
l2Blob := "AQIDBAUGB5iL2EncryptedBlob+test=="
|
||||
l3Blob := "AQIDBAUGB5iL3EncryptedBlob+test=="
|
||||
|
||||
created := c.must(c.req("POST", "/api/entries", map[string]any{
|
||||
"type": "credential", "title": "TierTest",
|
||||
"data": map[string]any{
|
||||
"title": "TierTest", "type": "credential",
|
||||
"fields": []map[string]any{
|
||||
{"label": "Username", "value": "testuser", "kind": "text"},
|
||||
{"label": "Password", "value": l2Blob, "kind": "password", "tier": 2},
|
||||
{"label": "SSN", "value": l3Blob, "kind": "text", "tier": 3, "l2": true},
|
||||
},
|
||||
},
|
||||
}), 201)
|
||||
id := created["entry_id"].(string)
|
||||
|
||||
got := c.must(c.req("GET", "/api/entries/"+id, nil), 200)
|
||||
data := got["data"].(map[string]any)
|
||||
fields := data["fields"].([]any)
|
||||
|
||||
found := map[string]string{}
|
||||
for _, raw := range fields {
|
||||
f := raw.(map[string]any)
|
||||
found[f["label"].(string)], _ = f["value"].(string)
|
||||
}
|
||||
|
||||
if found["Username"] != "testuser" {
|
||||
t.Errorf("L1 Username = %q, want testuser", found["Username"])
|
||||
}
|
||||
if found["Password"] != l2Blob {
|
||||
t.Errorf("L2 Password blob changed: %q", found["Password"])
|
||||
}
|
||||
if found["SSN"] != l3Blob {
|
||||
t.Errorf("L3 SSN blob changed: %q", found["SSN"])
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Scoped Access
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// reqAgent sends an authenticated request using a cvt_ agent token.
|
||||
func (c *tc) reqAgent(token, method, path string, body any) *http.Response {
|
||||
c.t.Helper()
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, _ := json.Marshal(body)
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
req, _ := http.NewRequest(method, c.srv.URL+path, r)
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
resp, err := c.srv.Client().Do(req)
|
||||
if err != nil {
|
||||
c.t.Fatalf("req %s %s: %v", method, path, err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// createAgent creates an agent via the owner API and returns the raw token.
|
||||
func (c *tc) createAgent(name string, allAccess bool) string {
|
||||
c.t.Helper()
|
||||
resp := c.req("POST", "/api/agents", map[string]any{
|
||||
"name": name,
|
||||
"all_access": allAccess,
|
||||
})
|
||||
result := c.must(resp, 201)
|
||||
token, ok := result["token"].(string)
|
||||
if !ok || token == "" {
|
||||
c.t.Fatalf("createAgent: no token in response: %v", result)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
func TestScopedAccess_agent_sees_only_scoped_entries(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/auth/setup", nil), 200)
|
||||
|
||||
// Create agent (gets scope "0001" since it's agent id=1)
|
||||
token := c.createAgent("Claude Code", false)
|
||||
|
||||
// Create two entries: one scoped to agent 0001, one owner-only
|
||||
c.must(c.req("POST", "/api/entries", map[string]any{
|
||||
"title": "Scoped Entry",
|
||||
"type": "credential",
|
||||
"data": map[string]any{"title": "Scoped Entry", "type": "credential"},
|
||||
}), 201)
|
||||
c.must(c.req("POST", "/api/entries", map[string]any{
|
||||
"title": "Owner Only",
|
||||
"type": "credential",
|
||||
"data": map[string]any{"title": "Owner Only", "type": "credential"},
|
||||
}), 201)
|
||||
|
||||
// Get all entries via owner — should see both
|
||||
ownerEntries := c.mustList(c.req("GET", "/api/entries", nil), 200)
|
||||
if len(ownerEntries) != 2 {
|
||||
t.Fatalf("owner should see 2 entries, got %d", len(ownerEntries))
|
||||
}
|
||||
|
||||
// Find the scoped entry by title and assign scope "0001"
|
||||
var scopedEntryID string
|
||||
for _, e := range ownerEntries {
|
||||
if e["title"] == "Scoped Entry" {
|
||||
scopedEntryID = e["entry_id"].(string)
|
||||
break
|
||||
}
|
||||
}
|
||||
c.must(c.req("PUT", "/api/entries/"+scopedEntryID+"/scopes", map[string]any{
|
||||
"scopes": "0001",
|
||||
}), 200)
|
||||
|
||||
// Agent should see only the scoped entry
|
||||
agentEntries := c.mustList(c.reqAgent(token, "GET", "/api/entries", nil), 200)
|
||||
if len(agentEntries) != 1 {
|
||||
t.Fatalf("agent should see 1 entry, got %d", len(agentEntries))
|
||||
}
|
||||
if agentEntries[0]["title"] != "Scoped Entry" {
|
||||
t.Errorf("agent saw wrong entry: %s", agentEntries[0]["title"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestScopedAccess_agent_forbidden_on_unscoped_entry(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/auth/setup", nil), 200)
|
||||
|
||||
token := c.createAgent("Test Agent", false)
|
||||
|
||||
// Create owner-only entry (no scopes)
|
||||
resp := c.req("POST", "/api/entries", map[string]any{
|
||||
"title": "Secret",
|
||||
"type": "note",
|
||||
"data": map[string]any{"title": "Secret", "type": "note"},
|
||||
})
|
||||
entry := c.must(resp, 201)
|
||||
entryID := entry["entry_id"].(string)
|
||||
|
||||
// Agent tries to GET it — should be 403
|
||||
agentResp := c.reqAgent(token, "GET", "/api/entries/"+entryID, nil)
|
||||
if agentResp.StatusCode != 403 {
|
||||
t.Fatalf("expected 403, got %d", agentResp.StatusCode)
|
||||
}
|
||||
agentResp.Body.Close()
|
||||
}
|
||||
|
||||
func TestScopedAccess_all_access_agent_sees_everything(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/auth/setup", nil), 200)
|
||||
|
||||
token := c.createAgent("Full Access Agent", true)
|
||||
|
||||
// Create owner-only entry
|
||||
c.must(c.req("POST", "/api/entries", map[string]any{
|
||||
"title": "Owner Secret",
|
||||
"type": "note",
|
||||
"data": map[string]any{"title": "Owner Secret", "type": "note"},
|
||||
}), 201)
|
||||
|
||||
// all_access agent should see it
|
||||
agentEntries := c.mustList(c.reqAgent(token, "GET", "/api/entries", nil), 200)
|
||||
if len(agentEntries) != 1 {
|
||||
t.Fatalf("all_access agent should see 1 entry, got %d", len(agentEntries))
|
||||
}
|
||||
}
|
||||
|
||||
func TestScopedAccess_agent_cannot_manage_agents(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/auth/setup", nil), 200)
|
||||
|
||||
token := c.createAgent("Evil Agent", false)
|
||||
|
||||
// Agent tries to create another agent — should be 403
|
||||
resp := c.reqAgent(token, "POST", "/api/agents", map[string]any{
|
||||
"name": "Backdoor",
|
||||
})
|
||||
if resp.StatusCode != 403 {
|
||||
t.Fatalf("agent should not create agents: got %d", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
// Agent tries to list agents — should be 403
|
||||
resp = c.reqAgent(token, "GET", "/api/agents", nil)
|
||||
if resp.StatusCode != 403 {
|
||||
t.Fatalf("agent should not list agents: got %d", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
func TestScopedAccess_agent_cannot_modify_scopes(t *testing.T) {
|
||||
c := newTestClient(t)
|
||||
c.must(c.req("POST", "/api/auth/setup", nil), 200)
|
||||
|
||||
token := c.createAgent("Scope Hijacker", false)
|
||||
|
||||
// Create an entry
|
||||
entry := c.must(c.req("POST", "/api/entries", map[string]any{
|
||||
"title": "Target",
|
||||
"type": "credential",
|
||||
"data": map[string]any{"title": "Target", "type": "credential"},
|
||||
}), 201)
|
||||
entryID := entry["entry_id"].(string)
|
||||
|
||||
// Agent tries to modify scopes — should be 403
|
||||
resp := c.reqAgent(token, "PUT", "/api/entries/"+entryID+"/scopes", map[string]any{
|
||||
"scopes": "0001",
|
||||
})
|
||||
if resp.StatusCode != 403 {
|
||||
t.Fatalf("agent should not modify scopes: got %d", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
|
@ -1,390 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/johanj/clavitor/lib"
|
||||
)
|
||||
|
||||
// base64Decode handles both standard and url-safe base64 (with or without padding).
|
||||
func base64Decode(s string) ([]byte, error) {
|
||||
// Try url-safe first (no padding), then standard
|
||||
s = strings.TrimRight(s, "=")
|
||||
b, err := base64.RawURLEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
b, err = base64.RawStdEncoding.DecodeString(s)
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
// base64UrlEncode encodes bytes as base64url without padding.
|
||||
func base64UrlEncode(b []byte) string {
|
||||
return base64.RawURLEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
ctxActor contextKey = "actor"
|
||||
ctxSession contextKey = "session"
|
||||
ctxAgent contextKey = "agent"
|
||||
ctxDB contextKey = "db"
|
||||
ctxVaultKey contextKey = "vault_key"
|
||||
ctxVaultID contextKey = "vault_id"
|
||||
)
|
||||
|
||||
// ActorFromContext returns the actor type from request context.
|
||||
func ActorFromContext(ctx context.Context) string {
|
||||
v, ok := ctx.Value(ctxActor).(string)
|
||||
if !ok {
|
||||
return lib.ActorWeb
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// SessionFromContext returns the session from request context.
|
||||
func SessionFromContext(ctx context.Context) *lib.Session {
|
||||
v, _ := ctx.Value(ctxSession).(*lib.Session)
|
||||
return v
|
||||
}
|
||||
|
||||
// AgentFromContext returns the agent from request context (nil if not an agent request).
|
||||
func AgentFromContext(ctx context.Context) *lib.Agent {
|
||||
v, _ := ctx.Value(ctxAgent).(*lib.Agent)
|
||||
return v
|
||||
}
|
||||
|
||||
// DBFromContext returns the vault DB from request context (nil in self-hosted mode).
|
||||
func DBFromContext(ctx context.Context) *lib.DB {
|
||||
v, _ := ctx.Value(ctxDB).(*lib.DB)
|
||||
return v
|
||||
}
|
||||
|
||||
// VaultKeyFromContext returns the derived vault key from request context (nil in self-hosted mode).
|
||||
func VaultKeyFromContext(ctx context.Context) []byte {
|
||||
v, _ := ctx.Value(ctxVaultKey).([]byte)
|
||||
return v
|
||||
}
|
||||
|
||||
// VaultIDFromContext returns the vault ID from request context (0 in self-hosted mode).
|
||||
func VaultIDFromContext(ctx context.Context) int64 {
|
||||
v, _ := ctx.Value(ctxVaultID).(int64)
|
||||
return v
|
||||
}
|
||||
|
||||
// L1Middleware extracts L1 from Bearer token and opens the vault DB.
|
||||
// Fully stateless: L1 arrives with every request, is used, then forgotten.
|
||||
// No sessions, no stored keys. The server has zero keys of its own.
|
||||
//
|
||||
// Self-hosted mode: finds vault DB by globbing clavitor-* files.
|
||||
// Hosted mode: finds vault DB by base64url(L1[0:4]) → filename.
|
||||
func L1Middleware(dataDir string) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
auth := r.Header.Get("Authorization")
|
||||
|
||||
// No auth = unauthenticated request (registration, login begin, etc.)
|
||||
if auth == "" || !strings.HasPrefix(auth, "Bearer ") {
|
||||
// Try to open vault DB without L1 (for unauthenticated endpoints)
|
||||
matches, _ := filepath.Glob(filepath.Join(dataDir, "clavitor-*"))
|
||||
if len(matches) > 0 {
|
||||
db, err := lib.OpenDB(matches[0])
|
||||
if err == nil {
|
||||
defer db.Close()
|
||||
ctx := context.WithValue(r.Context(), ctxDB, db)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
return
|
||||
}
|
||||
}
|
||||
// Also try legacy .db files for migration
|
||||
matches, _ = filepath.Glob(filepath.Join(dataDir, "????????.db"))
|
||||
if len(matches) > 0 {
|
||||
db, err := lib.OpenDB(matches[0])
|
||||
if err == nil {
|
||||
defer db.Close()
|
||||
ctx := context.WithValue(r.Context(), ctxDB, db)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
return
|
||||
}
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
bearerVal := strings.TrimPrefix(auth, "Bearer ")
|
||||
|
||||
var l1Raw []byte
|
||||
var agent *lib.Agent
|
||||
|
||||
if strings.HasPrefix(bearerVal, "cvt_") {
|
||||
// --- Agent token: cvt_ prefix ---
|
||||
// Extract L1 and look up agent by token hash.
|
||||
var hash string
|
||||
var err error
|
||||
l1Raw, hash, err = lib.ParseToken(bearerVal)
|
||||
if err != nil {
|
||||
ErrorResponse(w, http.StatusUnauthorized, "invalid_token", "Invalid agent token")
|
||||
return
|
||||
}
|
||||
|
||||
// Open vault DB from L1
|
||||
l1Key := lib.NormalizeKey(l1Raw)
|
||||
vaultPrefix := base64UrlEncode(l1Raw[:4])
|
||||
dbPath := filepath.Join(dataDir, "clavitor-"+vaultPrefix)
|
||||
|
||||
db, err := lib.OpenDB(dbPath)
|
||||
if err != nil {
|
||||
ErrorResponse(w, http.StatusNotFound, "vault_not_found", "Vault not found")
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Look up agent
|
||||
agent, err = lib.AgentGetByToken(db, hash)
|
||||
if err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "agent_error", "Agent lookup failed")
|
||||
return
|
||||
}
|
||||
if agent == nil {
|
||||
ErrorResponse(w, http.StatusUnauthorized, "unknown_token", "Invalid or revoked token")
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.WithValue(r.Context(), ctxDB, db)
|
||||
ctx = context.WithValue(ctx, ctxVaultKey, l1Key)
|
||||
ctx = context.WithValue(ctx, ctxActor, lib.ActorAgent)
|
||||
ctx = context.WithValue(ctx, ctxAgent, agent)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
|
||||
} else {
|
||||
// --- Legacy L1 bearer (web UI / extension) ---
|
||||
// 8 bytes base64url = vault owner, full access, no agent.
|
||||
l1Raw, err := base64Decode(bearerVal)
|
||||
if err != nil || len(l1Raw) != 8 {
|
||||
ErrorResponse(w, http.StatusUnauthorized, "invalid_l1", "Invalid L1 key in Bearer")
|
||||
return
|
||||
}
|
||||
|
||||
l1Key := lib.NormalizeKey(l1Raw)
|
||||
vaultPrefix := base64UrlEncode(l1Raw[:4])
|
||||
dbPath := filepath.Join(dataDir, "clavitor-"+vaultPrefix)
|
||||
|
||||
var db *lib.DB
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
db, err = lib.OpenDB(dbPath)
|
||||
if err != nil {
|
||||
log.Printf("vault open error (%s): %v", dbPath, err)
|
||||
ErrorResponse(w, http.StatusInternalServerError, "db_error", "Failed to open vault")
|
||||
return
|
||||
}
|
||||
}
|
||||
if db == nil {
|
||||
ErrorResponse(w, http.StatusNotFound, "vault_not_found", "Vault not found")
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
ctx := context.WithValue(r.Context(), ctxDB, db)
|
||||
ctx = context.WithValue(ctx, ctxVaultKey, l1Key)
|
||||
ctx = context.WithValue(ctx, ctxActor, lib.ActorWeb)
|
||||
// No agent in context = vault owner (full access)
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// IsAgentRequest returns true if the request was made with a cvt_ agent token.
|
||||
func IsAgentRequest(r *http.Request) bool {
|
||||
return AgentFromContext(r.Context()) != nil
|
||||
}
|
||||
|
||||
// LoggingMiddleware logs HTTP requests.
|
||||
func LoggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
wrapped := &statusWriter{ResponseWriter: w, status: 200}
|
||||
next.ServeHTTP(wrapped, r)
|
||||
log.Printf("%s %s %d %s", r.Method, r.URL.Path, wrapped.status, time.Since(start))
|
||||
})
|
||||
}
|
||||
|
||||
type statusWriter struct {
|
||||
http.ResponseWriter
|
||||
status int
|
||||
}
|
||||
|
||||
func (w *statusWriter) WriteHeader(code int) {
|
||||
w.status = code
|
||||
w.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// RateLimitMiddleware implements per-IP rate limiting.
|
||||
func RateLimitMiddleware(requestsPerMinute int) func(http.Handler) http.Handler {
|
||||
var mu sync.Mutex
|
||||
clients := make(map[string]*rateLimitEntry)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Minute)
|
||||
mu.Lock()
|
||||
now := time.Now()
|
||||
for ip, entry := range clients {
|
||||
if now.Sub(entry.windowStart) > time.Minute {
|
||||
delete(clients, ip)
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ip := realIP(r)
|
||||
|
||||
mu.Lock()
|
||||
entry, exists := clients[ip]
|
||||
now := time.Now()
|
||||
if !exists || now.Sub(entry.windowStart) > time.Minute {
|
||||
entry = &rateLimitEntry{windowStart: now, count: 0}
|
||||
clients[ip] = entry
|
||||
}
|
||||
entry.count++
|
||||
count := entry.count
|
||||
mu.Unlock()
|
||||
|
||||
if count > requestsPerMinute {
|
||||
ErrorResponse(w, http.StatusTooManyRequests, "rate_limited", "Too many requests")
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type rateLimitEntry struct {
|
||||
windowStart time.Time
|
||||
count int
|
||||
}
|
||||
|
||||
// CORSMiddleware handles CORS headers.
|
||||
func CORSMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
// Allow localhost and 127.0.0.1 for development
|
||||
if origin != "" && (strings.Contains(origin, "localhost") || strings.Contains(origin, "127.0.0.1")) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", origin)
|
||||
w.Header().Set("Vary", "Origin")
|
||||
}
|
||||
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
|
||||
w.Header().Set("Access-Control-Max-Age", "86400")
|
||||
|
||||
if r.Method == "OPTIONS" {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// SecurityHeadersMiddleware adds security headers to all responses.
|
||||
func SecurityHeadersMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("X-Frame-Options", "DENY")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
||||
w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
|
||||
// CSP allowing localhost and 127.0.0.1 for development
|
||||
w.Header().Set("Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.tailwindcss.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; font-src 'self' data: https://fonts.gstatic.com; img-src 'self' data: https:; connect-src 'self' localhost 127.0.0.1")
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// ErrorResponse sends a standard JSON error response.
|
||||
func ErrorResponse(w http.ResponseWriter, status int, code, message string) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"error": message,
|
||||
"code": code,
|
||||
})
|
||||
}
|
||||
|
||||
// JSONResponse sends a standard JSON success response.
|
||||
func JSONResponse(w http.ResponseWriter, status int, data any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(data)
|
||||
}
|
||||
|
||||
// tarpitHandler holds unrecognized requests for 30 seconds.
|
||||
// Drips one byte per second to keep the connection alive and waste
|
||||
// scanner resources. Capped at 1000 concurrent tarpit slots —
|
||||
// beyond that, connections are dropped immediately.
|
||||
var (
|
||||
tarpitSem = make(chan struct{}, 1000)
|
||||
)
|
||||
|
||||
func tarpitHandler(w http.ResponseWriter, r *http.Request) {
|
||||
select {
|
||||
case tarpitSem <- struct{}{}:
|
||||
defer func() { <-tarpitSem }()
|
||||
default:
|
||||
// Tarpit full — drop immediately, no response
|
||||
if hj, ok := w.(http.Hijacker); ok {
|
||||
conn, _, err := hj.Hijack()
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("tarpit: %s %s from %s", r.Method, r.URL.Path, realIP(r))
|
||||
|
||||
// Chunked response: drip one space per second for 30s
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(200)
|
||||
flusher, canFlush := w.(http.Flusher)
|
||||
for i := 0; i < 30; i++ {
|
||||
_, err := w.Write([]byte(" "))
|
||||
if err != nil {
|
||||
return // client gave up
|
||||
}
|
||||
if canFlush {
|
||||
flusher.Flush()
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func realIP(r *http.Request) string {
|
||||
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
|
||||
parts := strings.SplitN(xff, ",", 2)
|
||||
return strings.TrimSpace(parts[0])
|
||||
}
|
||||
if xri := r.Header.Get("X-Real-IP"); xri != "" {
|
||||
return xri
|
||||
}
|
||||
addr := r.RemoteAddr
|
||||
if idx := strings.LastIndex(addr, ":"); idx != -1 {
|
||||
return addr[:idx]
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
|
@ -1,170 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/johanj/clavitor/lib"
|
||||
)
|
||||
|
||||
// NewRouter creates the main router with all routes registered.
|
||||
func NewRouter(cfg *lib.Config, webFS embed.FS) *chi.Mux {
|
||||
r := chi.NewRouter()
|
||||
h := NewHandlers(cfg)
|
||||
|
||||
// Global middleware
|
||||
r.Use(LoggingMiddleware)
|
||||
r.Use(CORSMiddleware)
|
||||
r.Use(SecurityHeadersMiddleware)
|
||||
r.Use(RateLimitMiddleware(120)) // 120 req/min per IP
|
||||
r.Use(L1Middleware(cfg.DataDir)) // stateless: extract L1 from Bearer, open DB, forget
|
||||
|
||||
// Health check (unauthenticated — no Bearer needed)
|
||||
r.Get("/health", h.Health)
|
||||
|
||||
// Ping — minimal latency probe for looking glass (no DB, no auth)
|
||||
node, _ := os.Hostname()
|
||||
r.Get("/ping", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
body := fmt.Sprintf(`{"ok":true,"node":"%s","ts":%d}`, node, time.Now().Unix())
|
||||
w.Write([]byte(body))
|
||||
})
|
||||
|
||||
// Auth endpoints (unauthenticated — no Bearer, DB found by glob)
|
||||
r.Get("/api/auth/status", h.AuthStatus)
|
||||
r.Post("/api/auth/register/begin", h.AuthRegisterBegin)
|
||||
r.Post("/api/auth/register/complete", h.AuthRegisterComplete)
|
||||
r.Post("/api/auth/login/begin", h.AuthLoginBegin)
|
||||
r.Post("/api/auth/login/complete", h.AuthLoginComplete)
|
||||
|
||||
// Legacy setup (only works when no credentials exist — for tests)
|
||||
r.Post("/api/auth/setup", h.Setup)
|
||||
|
||||
// API routes (authenticated — L1 in Bearer, already validated by L1Middleware)
|
||||
r.Route("/api", func(r chi.Router) {
|
||||
mountAPIRoutes(r, h)
|
||||
})
|
||||
|
||||
// --- Vault App UI at /app/* ---
|
||||
appRoot, err := fs.Sub(webFS, "web")
|
||||
if err == nil {
|
||||
appServer := http.FileServer(http.FS(appRoot))
|
||||
r.Get("/app", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/app/", http.StatusMovedPermanently)
|
||||
})
|
||||
r.Handle("/app/*", http.StripPrefix("/app", appServer))
|
||||
}
|
||||
|
||||
// --- Root-level: minimal, disclose nothing ---
|
||||
// Legitimate browser/crawler requests get a fast, empty response.
|
||||
// Everything else hits the tarpit (30s slow drain).
|
||||
|
||||
favicon, _ := fs.ReadFile(webFS, "web/favicon.svg")
|
||||
serveFavicon := func(w http.ResponseWriter, r *http.Request) {
|
||||
if favicon != nil {
|
||||
w.Header().Set("Content-Type", "image/svg+xml")
|
||||
w.Header().Set("Cache-Control", "public, max-age=86400")
|
||||
w.Write(favicon)
|
||||
} else {
|
||||
w.WriteHeader(204)
|
||||
}
|
||||
}
|
||||
nothing := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(204) }
|
||||
disallow := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write([]byte("User-agent: *\nDisallow: /\n"))
|
||||
}
|
||||
|
||||
r.Get("/", nothing)
|
||||
r.Get("/favicon.ico", serveFavicon)
|
||||
r.Get("/favicon.svg", serveFavicon)
|
||||
r.Get("/robots.txt", disallow)
|
||||
r.Get("/sitemap.xml", nothing)
|
||||
r.Get("/sitemap.xml.gz", nothing)
|
||||
r.Get("/sitemap-index.xml", nothing)
|
||||
r.Get("/ads.txt", nothing)
|
||||
r.Get("/app-ads.txt", nothing)
|
||||
r.Get("/manifest.json", nothing)
|
||||
r.Get("/browserconfig.xml", nothing)
|
||||
r.Get("/crossdomain.xml", nothing)
|
||||
r.Get("/humans.txt", nothing)
|
||||
r.Get("/security.txt", nothing)
|
||||
r.Get("/apple-touch-icon.png", nothing)
|
||||
r.Get("/apple-touch-icon-precomposed.png", nothing)
|
||||
r.Get("/.well-known/security.txt", nothing)
|
||||
r.Get("/.well-known/acme-challenge/*", nothing)
|
||||
r.Get("/.well-known/change-password", nothing)
|
||||
r.Get("/.well-known/openid-configuration", nothing)
|
||||
r.Get("/.well-known/webfinger", nothing)
|
||||
r.Get("/.well-known/assetlinks.json", nothing)
|
||||
r.Get("/.well-known/apple-app-site-association", nothing)
|
||||
r.Get("/.well-known/mta-sts.txt", nothing)
|
||||
r.Get("/.well-known/nodeinfo", nothing)
|
||||
|
||||
// Tarpit: everything not registered above.
|
||||
// Hold the connection for 30s, drip slowly, waste scanner resources.
|
||||
r.NotFound(tarpitHandler)
|
||||
r.MethodNotAllowed(tarpitHandler)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// mountAPIRoutes registers the authenticated API handlers on the given router.
|
||||
func mountAPIRoutes(r chi.Router, h *Handlers) {
|
||||
// Vault info (for Tokens page config snippets)
|
||||
r.Get("/vault-info", h.VaultInfo)
|
||||
|
||||
// Entries CRUD
|
||||
r.Get("/entries", h.ListEntries)
|
||||
r.Post("/entries", h.CreateEntry)
|
||||
r.Put("/entries", h.UpsertEntry)
|
||||
r.Get("/entries/{id}", h.GetEntry)
|
||||
r.Put("/entries/{id}", h.UpdateEntry)
|
||||
r.Delete("/entries/{id}", h.DeleteEntry)
|
||||
|
||||
// Search
|
||||
r.Get("/search", h.SearchEntries)
|
||||
|
||||
// Password generator
|
||||
r.Get("/generate", h.GeneratePassword)
|
||||
|
||||
// Audit log
|
||||
r.Get("/audit", h.GetAuditLog)
|
||||
|
||||
// Extension API
|
||||
r.Get("/ext/totp/{id}", h.GetTOTP)
|
||||
r.Get("/ext/match", h.MatchURL)
|
||||
|
||||
// Backups
|
||||
r.Get("/backups", h.ListBackups)
|
||||
r.Post("/backups", h.CreateBackup)
|
||||
r.Post("/backups/restore", h.RestoreBackup)
|
||||
|
||||
// Agent management (owner-only — handlers reject agent tokens)
|
||||
r.Post("/agents", h.HandleCreateAgent)
|
||||
r.Get("/agents", h.HandleListAgents)
|
||||
r.Get("/agents/{id}", h.HandleGetAgent)
|
||||
r.Put("/agents/{id}", h.HandleUpdateAgent)
|
||||
r.Delete("/agents/{id}", h.HandleDeleteAgent)
|
||||
|
||||
// Entry scope management (owner-only)
|
||||
r.Put("/entries/{id}/scopes", h.HandleUpdateEntryScopes)
|
||||
|
||||
// Vault lock
|
||||
r.Get("/vault-lock", h.HandleVaultLockStatus)
|
||||
r.Post("/vault-unlock", h.HandleVaultUnlock)
|
||||
|
||||
// WebAuthn
|
||||
r.Post("/webauthn/register/begin", h.HandleWebAuthnRegisterBegin)
|
||||
r.Post("/webauthn/register/complete", h.HandleWebAuthnRegisterComplete)
|
||||
r.Post("/webauthn/auth/begin", h.HandleWebAuthnAuthBegin)
|
||||
r.Post("/webauthn/auth/complete", h.HandleWebAuthnAuthComplete)
|
||||
r.Get("/webauthn/credentials", h.HandleListWebAuthnCredentials)
|
||||
r.Delete("/webauthn/credentials/{id}", h.HandleDeleteWebAuthnCredential)
|
||||
}
|
||||
|
|
@ -1,113 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/johanj/clavitor/api"
|
||||
"github.com/johanj/clavitor/lib"
|
||||
"github.com/johanj/clavitor/proxy"
|
||||
)
|
||||
|
||||
//go:embed web
|
||||
var webFS embed.FS
|
||||
|
||||
// Set via -ldflags at build time.
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "unknown"
|
||||
buildDate = "unknown"
|
||||
)
|
||||
|
||||
func main() {
|
||||
api.Version = version + " (" + commit + " " + buildDate + ")"
|
||||
|
||||
port := flag.Int("port", envInt("PORT", 443), "Listen port")
|
||||
telemetryFreq := flag.Int("telemetry-freq", envInt("TELEMETRY_FREQ", 0), "Telemetry POST interval in seconds (0 = disabled)")
|
||||
telemetryHost := flag.String("telemetry-host", envStr("TELEMETRY_HOST", ""), "Telemetry endpoint URL")
|
||||
telemetryToken := flag.String("telemetry-token", envStr("TELEMETRY_TOKEN", ""), "Bearer token for telemetry endpoint")
|
||||
|
||||
// Proxy mode flags
|
||||
proxyEnabled := flag.Bool("proxy", envBool("PROXY_ENABLED", false), "Enable MITM proxy mode (set HTTP_PROXY=http://127.0.0.1:19840 in agent)")
|
||||
proxyAddr := flag.String("proxy-addr", envStr("PROXY_ADDR", "127.0.0.1:19840"), "Proxy listen address")
|
||||
proxyLLM := flag.Bool("proxy-llm", envBool("PROXY_LLM", false), "Enable LLM policy evaluation in proxy")
|
||||
proxyLLMURL := flag.String("proxy-llm-url", envStr("PROXY_LLM_URL", ""), "LLM API base URL for proxy policy (OpenAI-compatible)")
|
||||
proxyLLMKey := flag.String("proxy-llm-key", envStr("PROXY_LLM_KEY", ""), "LLM API key for proxy policy")
|
||||
proxyLLMModel := flag.String("proxy-llm-model", envStr("PROXY_LLM_MODEL", ""), "LLM model for proxy policy evaluation")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
cfg, err := lib.LoadConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("config: %v", err)
|
||||
}
|
||||
cfg.Port = strconv.Itoa(*port)
|
||||
|
||||
// Start telemetry reporter if configured.
|
||||
lib.StartTelemetry(lib.TelemetryConfig{
|
||||
FreqSeconds: *telemetryFreq,
|
||||
Host: *telemetryHost,
|
||||
Token: *telemetryToken,
|
||||
DataDir: cfg.DataDir,
|
||||
Version: version,
|
||||
})
|
||||
|
||||
// Start automatic backup scheduler (3 weekly + 3 monthly, rotated)
|
||||
lib.StartBackupTimer(cfg.DataDir)
|
||||
|
||||
// Start proxy if enabled
|
||||
if *proxyEnabled {
|
||||
px, err := proxy.New(proxy.Config{
|
||||
ListenAddr: *proxyAddr,
|
||||
DataDir: cfg.DataDir,
|
||||
LLMEnabled: *proxyLLM,
|
||||
LLMBaseURL: *proxyLLMURL,
|
||||
LLMAPIKey: *proxyLLMKey,
|
||||
LLMModel: *proxyLLMModel,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("proxy: %v", err)
|
||||
}
|
||||
go func() {
|
||||
if err := px.ListenAndServe(); err != nil {
|
||||
log.Printf("proxy: stopped: %v", err)
|
||||
}
|
||||
}()
|
||||
log.Printf("proxy: CA cert at %s — install in OS trust store", px.CACertPath())
|
||||
log.Printf("proxy: set HTTP_PROXY=http://%s HTTPS_PROXY=http://%s in agent environment", *proxyAddr, *proxyAddr)
|
||||
}
|
||||
|
||||
router := api.NewRouter(cfg, webFS)
|
||||
|
||||
addr := ":" + cfg.Port
|
||||
tlsCfg := lib.LoadTLSConfig()
|
||||
if err := lib.ListenAndServeTLS(addr, router, tlsCfg); err != nil {
|
||||
log.Fatalf("server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func envStr(key, fallback string) string {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func envBool(key string, fallback bool) bool {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v == "1" || v == "true" || v == "yes"
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
func envInt(key string, fallback int) int {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
if n, err := strconv.Atoi(v); err == nil {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
|
@ -1,457 +0,0 @@
|
|||
/* ============================================================
|
||||
clavitor — app stylesheet
|
||||
Design tokens from clavitor.css (website styleguide).
|
||||
App-specific component classes below.
|
||||
============================================================ */
|
||||
|
||||
/* === TOKENS (shared with website) === */
|
||||
:root {
|
||||
--pad: 2rem;
|
||||
--radius: 1rem;
|
||||
--radius-sm: 0.5rem;
|
||||
--gap: 1.25rem;
|
||||
|
||||
--bg: #0d1520;
|
||||
--surface: #142542;
|
||||
--surface-alt: #0e2414;
|
||||
--surface-gold: #2a1f00;
|
||||
|
||||
--border: rgba(255,255,255,0.09);
|
||||
--border-gold: rgba(212,175,55,0.3);
|
||||
|
||||
--text: #f1f5f9;
|
||||
--muted: #94a3b8;
|
||||
--subtle: #64748b;
|
||||
|
||||
--accent: #4ade80;
|
||||
--gold: #D4AF37;
|
||||
--red: #EF4444;
|
||||
|
||||
--font-sans: Inter, system-ui, sans-serif;
|
||||
--font-mono: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
/* --- Themes --- */
|
||||
|
||||
body.theme-light {
|
||||
--bg: #f8f9fb;
|
||||
--surface: #ffffff;
|
||||
--surface-alt: #f0fdf4;
|
||||
--surface-gold: #fffbeb;
|
||||
--border: rgba(0,0,0,0.1);
|
||||
--border-gold: rgba(180,140,20,0.3);
|
||||
--text: #1e293b;
|
||||
--muted: #64748b;
|
||||
--subtle: #94a3b8;
|
||||
--accent: #16a34a;
|
||||
--gold: #b8860b;
|
||||
--red: #dc2626;
|
||||
}
|
||||
|
||||
body.theme-midnight {
|
||||
--bg: #020617;
|
||||
--surface: #0f172a;
|
||||
--surface-alt: #022c22;
|
||||
--surface-gold: #1c1500;
|
||||
--border: rgba(255,255,255,0.06);
|
||||
--border-gold: rgba(212,175,55,0.25);
|
||||
--text: #e2e8f0;
|
||||
--muted: #64748b;
|
||||
--subtle: #475569;
|
||||
--accent: #22c55e;
|
||||
--gold: #eab308;
|
||||
--red: #f87171;
|
||||
}
|
||||
|
||||
body.theme-light .topbar { background: rgba(255,255,255,0.9); }
|
||||
body.theme-light .vault-lock-banner { background: rgba(239,68,68,0.08); }
|
||||
}
|
||||
|
||||
/* === RESET === */
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
html, body { height: 100%; }
|
||||
body { background: var(--bg); color: var(--text); font-family: var(--font-sans); font-size: 0.875rem; line-height: 1.6; }
|
||||
a { color: inherit; text-decoration: none; }
|
||||
button { font-family: inherit; font-size: inherit; cursor: pointer; border: none; background: none; color: inherit; }
|
||||
input, select, textarea { font-family: inherit; font-size: inherit; color: var(--text); }
|
||||
|
||||
/* === TYPOGRAPHY === */
|
||||
h1 { font-size: 1.875rem; font-weight: 800; line-height: 1.1; color: var(--text); }
|
||||
h2 { font-size: 1.25rem; font-weight: 700; line-height: 1.2; color: var(--text); }
|
||||
h3 { font-size: 1.125rem; font-weight: 700; line-height: 1.3; color: var(--text); }
|
||||
p { color: var(--muted); line-height: 1.75; }
|
||||
.label { font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.12em; text-transform: uppercase; color: var(--subtle); }
|
||||
|
||||
/* === VAULTNAME (from website) === */
|
||||
.vaultname { font-family: var(--font-mono); font-weight: 700; color: var(--text); }
|
||||
.vaultname .n { color: var(--accent); }
|
||||
|
||||
/* === BUTTONS === */
|
||||
.btn { display: inline-flex; align-items: center; justify-content: center; gap: 0.375rem; font-family: var(--font-sans); font-size: 0.875rem; font-weight: 600; padding: 0.625rem 1.25rem; border-radius: var(--radius-sm); border: 1px solid transparent; cursor: pointer; transition: opacity 0.15s, transform 0.15s; text-align: center; line-height: 1.4; }
|
||||
.btn:hover { opacity: 0.85; }
|
||||
.btn:active { transform: scale(0.97); }
|
||||
.btn-primary { background: var(--accent); color: var(--bg); border-color: var(--accent); }
|
||||
.btn-ghost { background: transparent; color: var(--text); border-color: var(--border); }
|
||||
.btn-gold { background: rgba(212,175,55,0.15); color: var(--gold); border-color: rgba(212,175,55,0.3); }
|
||||
.btn-red { background: rgba(239,68,68,0.15); color: var(--red); border-color: rgba(239,68,68,0.3); }
|
||||
.btn-accent { background: rgba(34,197,94,0.15); color: var(--accent); border-color: rgba(34,197,94,0.3); }
|
||||
.btn-block { display: flex; width: 100%; }
|
||||
.btn-lg { padding: 0.75rem 1.5rem; font-size: 1rem; border-radius: var(--radius); }
|
||||
.btn-row { display: flex; flex-wrap: wrap; gap: 1rem; }
|
||||
|
||||
/* === CARDS === */
|
||||
.card { background: rgba(100,140,200,0.12); border: 1px solid rgba(148,163,184,0.15); border-radius: var(--radius); padding: 1.75rem; }
|
||||
.card.alt { background: rgba(34,197,94,0.15); border-color: rgba(34,197,94,0.3); }
|
||||
.card.gold { background: rgba(212,175,55,0.15); border-color: rgba(212,175,55,0.35); }
|
||||
.card.red { background: rgba(239,68,68,0.15); border-color: rgba(239,68,68,0.3); }
|
||||
.card-hover { transition: transform 0.2s, box-shadow 0.2s; border-color: rgba(255,255,255,0.12); }
|
||||
.card-hover:hover { transform: translateY(-2px); box-shadow: 0 8px 24px rgba(0,0,0,0.3); }
|
||||
|
||||
/* === BADGES === */
|
||||
.badge { display: inline-block; font-family: var(--font-mono); font-size: 0.7rem; font-weight: 600; padding: 0.25rem 0.625rem; border-radius: 9999px; }
|
||||
.badge.accent { background: rgba(34,197,94,0.15); color: var(--accent); border: 1px solid rgba(34,197,94,0.3); }
|
||||
.badge.gold { background: rgba(212,175,55,0.15); color: var(--gold); border: 1px solid rgba(212,175,55,0.3); }
|
||||
.badge.red { background: rgba(239,68,68,0.15); color: var(--red); border: 1px solid rgba(239,68,68,0.3); }
|
||||
.badge.blue { background: rgba(96,165,250,0.15); color: #60a5fa; border: 1px solid rgba(96,165,250,0.3); }
|
||||
.badge.muted { background: rgba(100,116,139,0.15); color: var(--subtle); border: 1px solid rgba(100,116,139,0.3); }
|
||||
|
||||
/* === SPACING (from website) === */
|
||||
.mt-2 { margin-top: 0.5rem; } .mb-2 { margin-bottom: 0.5rem; }
|
||||
.mt-3 { margin-top: 0.75rem; } .mb-3 { margin-bottom: 0.75rem; }
|
||||
.mt-4 { margin-top: 1rem; } .mb-4 { margin-bottom: 1rem; }
|
||||
.mt-6 { margin-top: 1.5rem; } .mb-6 { margin-bottom: 1.5rem; }
|
||||
.mt-8 { margin-top: 2rem; } .mb-8 { margin-bottom: 2rem; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Layout
|
||||
============================================================ */
|
||||
|
||||
.app-shell { display: flex; height: 100%; }
|
||||
|
||||
/* --- Centered column --- */
|
||||
.app-column { max-width: 52rem; margin: 0 auto; width: 100%; }
|
||||
|
||||
/* --- Top bar --- */
|
||||
.vault-lock-banner { background: rgba(239,68,68,0.15); border-bottom: 1px solid rgba(239,68,68,0.4); color: var(--red, #ef4444); padding: 0.6rem 1rem; display: flex; align-items: center; justify-content: space-between; font-weight: 600; font-size: 0.8125rem; }
|
||||
.topbar { position: sticky; top: 0; z-index: 40; background: rgba(10,22,40,0.85); backdrop-filter: blur(12px); -webkit-backdrop-filter: blur(12px); border-bottom: 1px solid var(--border); padding: 0 1rem; }
|
||||
.topbar-inner { display: flex; align-items: center; justify-content: space-between; height: 56px; }
|
||||
.topbar-logo { font-family: var(--font-mono); font-weight: 700; font-size: 1.25rem; color: var(--text); line-height: 1; letter-spacing: -0.02em; }
|
||||
.topbar-logo .n { color: var(--accent); }
|
||||
.topbar-links { display: flex; align-items: center; gap: 1.25rem; font-size: 0.875rem; }
|
||||
.topbar-links a,
|
||||
.topbar-links button { color: var(--muted); transition: color 0.15s; font-size: 0.875rem; }
|
||||
.topbar-links a:hover,
|
||||
.topbar-links button:hover { color: var(--text); }
|
||||
.topbar-links a.topbar-active { color: var(--text); }
|
||||
.topbar-lock { color: var(--gold) !important; }
|
||||
|
||||
/* --- Toolbar (search + actions) --- */
|
||||
.toolbar { background: rgba(10,22,40,0.5); border-bottom: 1px solid var(--border); padding: 0.75rem 1rem; }
|
||||
.toolbar-inner { display: flex; align-items: center; gap: 0.75rem; }
|
||||
.toolbar-search { flex: 0 1 20rem; padding: 0.5rem 0.875rem; background: rgba(255,255,255,0.05); border: 1px solid var(--border); border-radius: var(--radius-sm); color: var(--text); outline: none; transition: border-color 0.15s, box-shadow 0.15s; font-size: 0.875rem; }
|
||||
.toolbar-search:focus { border-color: var(--accent); box-shadow: 0 0 0 2px rgba(74,222,128,0.15); }
|
||||
.toolbar-search::placeholder { color: var(--subtle); }
|
||||
|
||||
/* --- Main area --- */
|
||||
.main-area { display: flex; flex-direction: column; height: 100%; overflow: hidden; }
|
||||
.main-content { flex: 1; overflow: hidden; display: flex; }
|
||||
|
||||
/* --- Split panes --- */
|
||||
.split-list { width: 26rem; min-width: 22rem; max-width: 32rem; overflow-y: auto; border-right: 1px solid var(--border); flex-shrink: 0; }
|
||||
.split-detail { flex: 1; overflow-y: auto; }
|
||||
.split-detail-empty { display: flex; align-items: center; justify-content: center; height: 100%; color: var(--subtle); font-size: 0.875rem; }
|
||||
|
||||
/* Active row in split list */
|
||||
.entry-row.active { background: rgba(74,222,128,0.08); border-right: 2px solid var(--accent); }
|
||||
|
||||
/* ============================================================
|
||||
APP — Entry List
|
||||
============================================================ */
|
||||
|
||||
/* Stats bar (between toolbar and content) */
|
||||
.list-stats { padding: 0.5rem 1rem; background: rgba(10,22,40,0.6); border-bottom: 1px solid var(--border); display: flex; flex-wrap: wrap; gap: 0.5rem; align-items: center; }
|
||||
.list-badge { font-size: 0.75rem; font-weight: 600; font-family: var(--font-mono); color: var(--text); padding: 0.25rem 0.625rem; border-radius: var(--radius-sm); background: rgba(255,255,255,0.08); border: 1px solid rgba(255,255,255,0.06); white-space: nowrap; cursor: pointer; transition: opacity 0.15s; }
|
||||
.list-badge:hover { opacity: 0.8; }
|
||||
.list-badge.active { outline: 1.5px solid currentColor; outline-offset: 1px; }
|
||||
.list-badge.type-credential { color: var(--accent); background: rgba(74,222,128,0.12); border-color: rgba(74,222,128,0.15); }
|
||||
.list-badge.type-card { color: var(--gold); background: rgba(212,175,55,0.12); border-color: rgba(212,175,55,0.15); }
|
||||
.list-badge.type-note { color: var(--muted); background: rgba(148,163,184,0.1); border-color: rgba(148,163,184,0.12); }
|
||||
.list-badge.type-identity { color: #60a5fa; background: rgba(96,165,250,0.12); border-color: rgba(96,165,250,0.15); }
|
||||
.list-badge.type-ssh_key { color: var(--red); background: rgba(239,68,68,0.12); border-color: rgba(239,68,68,0.15); }
|
||||
.list-badge.type-totp { color: #a855f7; background: rgba(168,85,247,0.12); border-color: rgba(168,85,247,0.15); }
|
||||
|
||||
.entry-row { display: flex; align-items: center; gap: 0.875rem; padding: 0.75rem 1rem; border-bottom: 1px solid rgba(255,255,255,0.04); cursor: pointer; transition: background 0.15s, transform 0.15s; }
|
||||
.entry-row:hover { background: rgba(255,255,255,0.05); }
|
||||
.entry-row:active { transform: scale(0.995); }
|
||||
.entry-icon { width: 2.75rem; height: 1.375rem; border-radius: 0.25rem; background: rgba(100,140,200,0.12); display: flex; align-items: center; justify-content: center; font-size: 0.5rem; font-weight: 600; color: var(--muted); flex-shrink: 0; font-family: var(--font-mono); text-transform: uppercase; letter-spacing: 0.05em; }
|
||||
.entry-icon.type-credential { background: rgba(74,222,128,0.1); color: var(--accent); }
|
||||
.entry-icon.type-card { background: rgba(212,175,55,0.1); color: var(--gold); }
|
||||
.entry-icon.type-identity { background: rgba(96,165,250,0.1); color: #60a5fa; }
|
||||
.entry-icon.type-note { background: rgba(148,163,184,0.1); color: var(--muted); }
|
||||
.entry-icon.type-ssh_key { background: rgba(239,68,68,0.1); color: var(--red); }
|
||||
.entry-icon.type-totp { background: rgba(168,85,247,0.1); color: #a855f7; }
|
||||
.entry-icon.type-folder { background: rgba(212,175,55,0.1); color: var(--gold); }
|
||||
.entry-domain { font-weight: 600; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; color: var(--text); }
|
||||
.entry-user { color: var(--subtle); overflow: hidden; text-overflow: ellipsis; white-space: nowrap; font-size: 0.8125rem; }
|
||||
.entry-user::before { content: '·'; margin: 0 0.5rem; color: var(--subtle); }
|
||||
.entry-l2 { flex-shrink: 0; }
|
||||
.entry-empty { text-align: center; color: var(--muted); padding: 5rem 1rem; }
|
||||
.entry-empty-icon { font-size: 2.5rem; margin-bottom: 1rem; opacity: 0.5; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Entry Detail
|
||||
============================================================ */
|
||||
|
||||
.detail-wrap { max-width: 48rem; padding: var(--pad); }
|
||||
.detail-header { display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem; padding-bottom: 1.5rem; border-bottom: 1px solid var(--border); }
|
||||
.detail-back { color: var(--muted); transition: color 0.15s; font-size: 0.875rem; display: inline-flex; align-items: center; gap: 0.375rem; }
|
||||
.detail-back:hover { color: var(--text); }
|
||||
.detail-icon { width: 3.25rem; height: 1.75rem; border-radius: 0.25rem; background: rgba(100,140,200,0.12); display: flex; align-items: center; justify-content: center; font-size: 0.625rem; font-weight: 600; color: var(--muted); flex-shrink: 0; font-family: var(--font-mono); text-transform: uppercase; letter-spacing: 0.05em; }
|
||||
.detail-icon.type-credential { background: rgba(74,222,128,0.1); color: var(--accent); }
|
||||
.detail-icon.type-card { background: rgba(212,175,55,0.1); color: var(--gold); }
|
||||
.detail-icon.type-identity { background: rgba(96,165,250,0.1); color: #60a5fa; }
|
||||
.detail-icon.type-note { background: rgba(148,163,184,0.1); color: var(--muted); }
|
||||
.detail-icon.type-ssh_key { background: rgba(239,68,68,0.1); color: var(--red); }
|
||||
.detail-icon.type-totp { background: rgba(168,85,247,0.1); color: #a855f7; }
|
||||
.detail-icon.type-folder { background: rgba(212,175,55,0.1); color: var(--gold); }
|
||||
.detail-title { font-size: 1.25rem; font-weight: 700; }
|
||||
.detail-type { font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.12em; text-transform: uppercase; color: var(--subtle); }
|
||||
.detail-urls { background: rgba(100,140,200,0.08); border: 1px solid rgba(148,163,184,0.1); border-radius: var(--radius-sm); padding: 0.875rem; }
|
||||
.detail-urls a { color: var(--gold); display: block; font-size: 0.875rem; transition: color 0.15s; }
|
||||
.detail-urls a:hover { text-decoration: underline; opacity: 0.85; }
|
||||
.detail-fields { display: flex; flex-direction: column; gap: 0.5rem; }
|
||||
.detail-actions { display: flex; gap: 0.75rem; }
|
||||
|
||||
/* --- Field box --- */
|
||||
.field-box { background: rgba(100,140,200,0.08); border: 1px solid rgba(148,163,184,0.1); border-radius: var(--radius-sm); padding: 0.875rem; transition: border-color 0.15s; }
|
||||
.field-box:hover { border-color: rgba(148,163,184,0.25); }
|
||||
.field-box.field-password { border-left: 3px solid var(--accent); }
|
||||
.field-box.field-totp { border-left: 3px solid #a855f7; }
|
||||
.field-box.field-l2 { border-left: 3px solid var(--gold); }
|
||||
.field-label { font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.08em; text-transform: uppercase; color: var(--subtle); margin-bottom: 0.375rem; display: flex; align-items: center; gap: 0.5rem; }
|
||||
.field-value { display: flex; align-items: center; gap: 0.5rem; }
|
||||
.field-l2-locked { color: var(--gold); font-style: italic; font-size: 0.875rem; }
|
||||
.field-l2-toggle { cursor: pointer; transition: opacity 0.15s; font-size: 0.625rem; }
|
||||
.field-l2-toggle:hover { opacity: 0.7; }
|
||||
.field-action { color: var(--subtle); font-size: 0.75rem; cursor: pointer; transition: color 0.15s; background: none; border: none; padding: 0.25rem; border-radius: 0.25rem; }
|
||||
.field-action:hover { color: var(--text); background: rgba(255,255,255,0.05); }
|
||||
.password-masked { font-family: var(--font-mono); letter-spacing: 0.1em; color: var(--muted); }
|
||||
.notes-box { background: rgba(100,140,200,0.08); border: 1px solid rgba(148,163,184,0.1); border-radius: var(--radius-sm); padding: 0.875rem; white-space: pre-wrap; font-size: 0.875rem; color: var(--muted); line-height: 1.7; }
|
||||
.notes-label { font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.08em; text-transform: uppercase; color: var(--subtle); margin-bottom: 0.375rem; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Modal
|
||||
============================================================ */
|
||||
|
||||
.modal-overlay { position: fixed; inset: 0; background: rgba(0,0,0,0.6); backdrop-filter: blur(4px); -webkit-backdrop-filter: blur(4px); display: flex; align-items: center; justify-content: center; z-index: 50; }
|
||||
.modal-box { background: var(--surface); border: 1px solid rgba(148,163,184,0.15); border-radius: var(--radius); box-shadow: 0 25px 50px rgba(0,0,0,0.5); max-width: 42rem; width: calc(100% - 2rem); max-height: 90vh; overflow-y: auto; }
|
||||
.modal-body { padding: 1.75rem; }
|
||||
.modal-title { font-size: 1.125rem; font-weight: 700; margin-bottom: 1.25rem; padding-bottom: 1rem; border-bottom: 1px solid var(--border); }
|
||||
.modal-actions { display: flex; justify-content: flex-end; gap: 0.75rem; margin-top: 1.5rem; padding-top: 1.25rem; border-top: 1px solid var(--border); }
|
||||
|
||||
/* ============================================================
|
||||
APP — Forms
|
||||
============================================================ */
|
||||
|
||||
.form-group { margin-bottom: 1rem; }
|
||||
.form-label { display: block; font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.08em; text-transform: uppercase; color: var(--subtle); margin-bottom: 0.375rem; }
|
||||
.form-input,
|
||||
.form-select,
|
||||
.form-textarea { width: 100%; padding: 0.5rem 0.75rem; background: rgba(255,255,255,0.05); border: 1px solid var(--border); border-radius: var(--radius-sm); color: var(--text); outline: none; transition: border-color 0.15s, box-shadow 0.15s; }
|
||||
.form-input:focus,
|
||||
.form-select:focus,
|
||||
.form-textarea:focus { border-color: var(--accent); box-shadow: 0 0 0 2px rgba(74,222,128,0.1); }
|
||||
.form-input-sm { padding: 0.375rem 0.5rem; font-size: 0.8125rem; }
|
||||
.form-row { display: flex; gap: 0.5rem; align-items: flex-start; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Drop Zone
|
||||
============================================================ */
|
||||
|
||||
.drop-zone { border: 2px dashed rgba(148,163,184,0.2); border-radius: var(--radius); padding: 2.5rem; text-align: center; cursor: pointer; transition: border-color 0.2s, background 0.2s; background: rgba(100,140,200,0.04); }
|
||||
.drop-zone:hover { border-color: rgba(148,163,184,0.35); background: rgba(100,140,200,0.08); }
|
||||
.drop-zone.active { border-color: var(--gold); background: rgba(212,175,55,0.05); }
|
||||
.drop-zone-icon { font-size: 2.5rem; margin-bottom: 0.75rem; opacity: 0.7; }
|
||||
.drop-zone-text { color: var(--muted); font-size: 0.875rem; }
|
||||
.drop-zone-hint { color: var(--subtle); font-size: 0.75rem; margin-top: 0.5rem; }
|
||||
|
||||
/* ============================================================
|
||||
APP — QR Scanner & TOTP Import
|
||||
============================================================ */
|
||||
|
||||
.import-divider { display: flex; align-items: center; gap: 1rem; margin: 0.5rem 0; color: var(--subtle); font-size: 0.75rem; text-transform: uppercase; letter-spacing: 0.05em; }
|
||||
.import-divider::before,
|
||||
.import-divider::after { content: ''; flex: 1; height: 1px; background: rgba(148,163,184,0.15); }
|
||||
|
||||
.btn-qr-scan { display: flex; align-items: center; justify-content: center; gap: 0.5rem; width: 100%; padding: 0.875rem; background: rgba(100,140,200,0.08); border: 1px solid rgba(100,140,200,0.2); border-radius: var(--radius); color: var(--text); font-size: 0.875rem; font-weight: 500; cursor: pointer; transition: background 0.2s, border-color 0.2s; }
|
||||
.btn-qr-scan:hover { background: rgba(100,140,200,0.15); border-color: rgba(100,140,200,0.35); }
|
||||
.btn-qr-scan svg { opacity: 0.7; }
|
||||
|
||||
.qr-viewfinder { position: relative; border-radius: var(--radius); overflow: hidden; background: #000; aspect-ratio: 4/3; }
|
||||
.qr-viewfinder video { width: 100%; height: 100%; object-fit: cover; }
|
||||
.qr-overlay { position: absolute; inset: 0; display: flex; align-items: center; justify-content: center; pointer-events: none; }
|
||||
.qr-frame { width: 60%; aspect-ratio: 1; border: 2px solid rgba(212,175,55,0.6); border-radius: 12px; box-shadow: 0 0 0 9999px rgba(0,0,0,0.4); }
|
||||
.qr-status { text-align: center; padding: 0.75rem; font-size: 0.8rem; color: var(--muted); }
|
||||
|
||||
.totp-import-list { display: flex; flex-direction: column; gap: 0.25rem; margin-bottom: 1rem; max-height: 300px; overflow-y: auto; }
|
||||
.totp-import-item { display: flex; align-items: center; gap: 0.75rem; padding: 0.625rem 0.75rem; border-radius: var(--radius); transition: opacity 0.2s; }
|
||||
.totp-import-item.skipped { opacity: 0.35; }
|
||||
.totp-import-info { flex: 1; min-width: 0; }
|
||||
.totp-import-name { font-size: 0.875rem; font-weight: 500; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; }
|
||||
.totp-import-sub { font-size: 0.75rem; color: var(--muted); white-space: nowrap; overflow: hidden; text-overflow: ellipsis; }
|
||||
|
||||
.totp-tier-select { display: flex; gap: 2px; flex-shrink: 0; }
|
||||
.tier-btn { padding: 0.2rem 0.55rem; font-size: 0.7rem; font-weight: 600; border: 1px solid rgba(148,163,184,0.15); border-radius: 4px; background: transparent; color: var(--muted); cursor: pointer; transition: all 0.15s; letter-spacing: 0.02em; }
|
||||
.tier-btn:hover { border-color: rgba(148,163,184,0.3); color: var(--text); }
|
||||
.tier-btn.active { background: rgba(100,140,200,0.15); border-color: rgba(100,140,200,0.4); color: var(--text); }
|
||||
.tier-btn.tier-skip.active { background: rgba(148,163,184,0.08); border-color: rgba(148,163,184,0.2); color: var(--subtle); }
|
||||
|
||||
/* ============================================================
|
||||
APP — Live TOTP Code
|
||||
============================================================ */
|
||||
|
||||
.totp-code { font-family: 'JetBrains Mono', monospace; font-size: 1.5rem; font-weight: 700; letter-spacing: 0.15em; color: var(--gold, #d4af37); }
|
||||
.totp-countdown { font-size: 0.75rem; font-weight: 600; color: var(--muted); margin-left: 0.75rem; min-width: 2rem; }
|
||||
.totp-countdown.totp-expiring { color: var(--red, #ef4444); animation: totp-pulse 0.5s ease-in-out infinite alternate; }
|
||||
@keyframes totp-pulse { from { opacity: 1; } to { opacity: 0.4; } }
|
||||
.field-totp .field-value { display: flex; align-items: center; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Import Preview
|
||||
============================================================ */
|
||||
|
||||
.import-summary { display: flex; align-items: center; gap: 0.75rem; margin-bottom: 1rem; flex-wrap: wrap; padding: 0.75rem; background: rgba(100,140,200,0.06); border-radius: var(--radius-sm); border: 1px solid var(--border); }
|
||||
.import-summary label { cursor: pointer; user-select: none; display: inline-flex; align-items: center; gap: 0.375rem; font-size: 0.8125rem; }
|
||||
.import-list { max-height: 18rem; overflow-y: auto; display: flex; flex-direction: column; gap: 0.375rem; }
|
||||
.import-item { display: flex; align-items: center; gap: 0.625rem; padding: 0.625rem 0.75rem; background: rgba(100,140,200,0.08); border: 1px solid rgba(148,163,184,0.08); border-radius: var(--radius-sm); transition: background 0.15s; }
|
||||
.import-item:hover { background: rgba(100,140,200,0.12); }
|
||||
.import-item.faded { opacity: 0.35; }
|
||||
.import-item-title { flex: 1; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; font-weight: 500; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Onboarding / Unlock
|
||||
============================================================ */
|
||||
|
||||
.onboard-wrap { min-height: 100%; display: flex; align-items: center; justify-content: center; padding: 1.5rem; }
|
||||
.onboard-inner { max-width: 28rem; width: 100%; }
|
||||
.onboard-heading { text-align: center; margin-bottom: 2rem; }
|
||||
.onboard-heading h1 { margin-bottom: 0.5rem; }
|
||||
.onboard-card { background: rgba(100,140,200,0.08); border: 1px solid rgba(148,163,184,0.15); border-radius: var(--radius); padding: 1.75rem; margin-bottom: 1.5rem; }
|
||||
.onboard-card h2 { margin-bottom: 0.5rem; }
|
||||
.device-list { display: flex; flex-direction: column; gap: 0.75rem; }
|
||||
.device-option { width: 100%; display: flex; align-items: center; gap: 1rem; padding: 1rem; border-radius: var(--radius-sm); background: rgba(255,255,255,0.04); border: 1px solid var(--border); transition: border-color 0.2s, background 0.2s, transform 0.15s; cursor: pointer; text-align: left; }
|
||||
.device-option:hover { border-color: var(--accent); background: rgba(74,222,128,0.04); transform: translateY(-1px); }
|
||||
.device-option-icon { font-size: 1.5rem; width: 2.5rem; height: 2.5rem; text-align: center; flex-shrink: 0; display: flex; align-items: center; justify-content: center; background: rgba(100,140,200,0.1); border-radius: var(--radius-sm); }
|
||||
.device-option strong { display: block; color: var(--text); }
|
||||
.device-option span { color: var(--muted); font-size: 0.8125rem; }
|
||||
.onboard-footer { text-align: center; font-size: 0.75rem; color: var(--subtle); }
|
||||
|
||||
.unlock-wrap { min-height: 100%; display: flex; align-items: center; justify-content: center; padding: 1.5rem; }
|
||||
.unlock-inner { max-width: 24rem; width: 100%; text-align: center; }
|
||||
.unlock-inner h1 { margin-bottom: 0.5rem; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Toast
|
||||
============================================================ */
|
||||
|
||||
.toast { position: fixed; top: 1rem; right: 1rem; padding: 0.75rem 1.25rem; border-radius: var(--radius-sm); z-index: 60; font-weight: 600; font-size: 0.875rem; animation: slideIn 0.3s ease; backdrop-filter: blur(12px); -webkit-backdrop-filter: blur(12px); box-shadow: 0 8px 24px rgba(0,0,0,0.3); }
|
||||
.toast.success { background: rgba(34,197,94,0.85); color: #fff; border: 1px solid rgba(34,197,94,0.5); }
|
||||
.toast.error { background: rgba(239,68,68,0.85); color: #fff; border: 1px solid rgba(239,68,68,0.5); }
|
||||
@keyframes slideIn { from { transform: translateY(-100%); opacity: 0; } to { transform: translateY(0); opacity: 1; } }
|
||||
|
||||
/* ============================================================
|
||||
APP — Audit Table
|
||||
============================================================ */
|
||||
|
||||
.audit-table { width: 100%; border-collapse: collapse; font-size: 0.8125rem; }
|
||||
.audit-table th { text-align: left; color: var(--subtle); padding: 0.5rem 0.75rem; font-weight: 600; font-family: var(--font-mono); font-size: 0.7rem; letter-spacing: 0.08em; text-transform: uppercase; border-bottom: 1px solid var(--border); }
|
||||
.audit-table td { padding: 0.625rem 0.75rem; color: var(--muted); }
|
||||
.audit-table tr + tr { border-top: 1px solid rgba(255,255,255,0.04); }
|
||||
.audit-table tbody tr:hover { background: rgba(255,255,255,0.03); }
|
||||
.audit-scroll { max-height: 24rem; overflow-y: auto; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Utilities
|
||||
============================================================ */
|
||||
|
||||
.hidden { display: none !important; }
|
||||
.truncate { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
|
||||
.text-gold { color: var(--gold); }
|
||||
.text-red { color: var(--red); }
|
||||
.text-accent { color: var(--accent); }
|
||||
.text-muted { color: var(--muted); }
|
||||
.text-subtle { color: var(--subtle); }
|
||||
.text-center { text-align: center; }
|
||||
.text-right { text-align: right; }
|
||||
.cursor-pointer { cursor: pointer; }
|
||||
.italic { font-style: italic; }
|
||||
.font-mono { font-family: var(--font-mono); }
|
||||
.select-all { user-select: all; }
|
||||
.disabled { opacity: 0.5; cursor: not-allowed; pointer-events: none; }
|
||||
|
||||
/* Alert boxes */
|
||||
.alert { padding: 0.75rem; border-radius: var(--radius-sm); font-size: 0.8125rem; }
|
||||
.alert-error { background: rgba(239,68,68,0.12); border: 1px solid rgba(239,68,68,0.25); color: var(--red); }
|
||||
.alert-warning { background: rgba(245,158,11,0.12); border: 1px solid rgba(245,158,11,0.25); color: #f59e0b; }
|
||||
|
||||
/* ============================================================
|
||||
APP — Config Snippets (token page)
|
||||
============================================================ */
|
||||
|
||||
.config-tabs { display: flex; gap: 0; border-bottom: 1px solid var(--border); margin-bottom: 0; }
|
||||
.config-tab { padding: 0.5rem 1rem; font-family: var(--font-mono); font-size: 0.75rem; font-weight: 600; letter-spacing: 0.04em; color: var(--subtle); background: none; border: none; border-bottom: 2px solid transparent; cursor: pointer; transition: color 0.15s, border-color 0.15s; }
|
||||
.config-tab:hover { color: var(--text); }
|
||||
.config-tab.active { color: var(--gold); border-bottom-color: var(--gold); }
|
||||
.config-block { position: relative; background: rgba(0,0,0,0.35); border: 1px solid var(--border); border-top: none; border-radius: 0 0 var(--radius-sm) var(--radius-sm); padding: 1rem 1.25rem; font-family: var(--font-mono); font-size: 0.8rem; line-height: 1.65; color: var(--muted); overflow-x: auto; white-space: pre; }
|
||||
.config-copy { position: absolute; top: 0.5rem; right: 0.5rem; font-family: var(--font-sans); font-size: 0.75rem; font-weight: 600; padding: 0.25rem 0.625rem; border-radius: 0.25rem; background: rgba(255,255,255,0.08); color: var(--subtle); border: 1px solid var(--border); cursor: pointer; transition: color 0.15s, background 0.15s; }
|
||||
.config-copy:hover { color: var(--text); background: rgba(255,255,255,0.12); }
|
||||
|
||||
/* ============================================================
|
||||
APP — Scrollbar (subtle, matching dark theme)
|
||||
============================================================ */
|
||||
::-webkit-scrollbar { width: 6px; }
|
||||
::-webkit-scrollbar-track { background: transparent; }
|
||||
::-webkit-scrollbar-thumb { background: rgba(148,163,184,0.15); border-radius: 3px; }
|
||||
::-webkit-scrollbar-thumb:hover { background: rgba(148,163,184,0.25); }
|
||||
|
||||
/* ============================================================
|
||||
SITE — Marketing / Landing Pages
|
||||
============================================================ */
|
||||
.site { min-height: 100vh; display: flex; flex-direction: column; }
|
||||
.site-column { max-width: 64rem; margin: 0 auto; width: 100%; padding: 0 1rem; }
|
||||
|
||||
/* Hero */
|
||||
.hero { text-align: center; margin-bottom: 5rem; }
|
||||
.hero-title { font-size: clamp(2.5rem, 6vw, 4rem); font-weight: 800; line-height: 1.1; margin-bottom: 1.5rem; letter-spacing: -0.03em; }
|
||||
.hero-sub { font-size: 1.125rem; color: var(--muted); max-width: 40rem; margin: 0 auto 2rem; line-height: 1.7; }
|
||||
.hero-actions { display: flex; gap: 1rem; justify-content: center; }
|
||||
|
||||
/* Features */
|
||||
.features { margin-bottom: 5rem; }
|
||||
.feature-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(18rem, 1fr)); gap: 1.25rem; }
|
||||
.feature-card { background: var(--surface); border: 1px solid var(--border); border-radius: var(--radius-sm); padding: 1.5rem; }
|
||||
.feature-card h3 { font-size: 1rem; font-weight: 700; margin: 0.5rem 0; }
|
||||
.feature-card p { font-size: 0.875rem; color: var(--muted); line-height: 1.6; margin: 0; }
|
||||
.feature-label { font-family: var(--font-mono); font-size: 0.6875rem; font-weight: 600; text-transform: uppercase; letter-spacing: 0.08em; color: var(--subtle); }
|
||||
.feature-label.accent { color: var(--accent); }
|
||||
|
||||
/* How it works */
|
||||
.how-it-works { margin-bottom: 5rem; }
|
||||
.how-it-works h2,
|
||||
.l2-demo h2 { font-size: 1.5rem; font-weight: 700; margin-bottom: 1.5rem; }
|
||||
.code-examples { display: grid; grid-template-columns: repeat(auto-fit, minmax(18rem, 1fr)); gap: 1.25rem; }
|
||||
.code-block { background: rgba(0,0,0,0.3); border: 1px solid var(--border); border-radius: var(--radius-sm); overflow: hidden; }
|
||||
.code-label { font-family: var(--font-mono); font-size: 0.75rem; font-weight: 600; padding: 0.625rem 1rem; background: rgba(255,255,255,0.03); border-bottom: 1px solid var(--border); color: var(--subtle); text-transform: uppercase; letter-spacing: 0.05em; }
|
||||
.code-block pre { margin: 0; padding: 1rem; overflow-x: auto; }
|
||||
.code-block code { font-family: var(--font-mono); font-size: 0.8125rem; color: var(--muted); line-height: 1.6; }
|
||||
|
||||
/* L2 demo */
|
||||
.l2-demo { margin-bottom: 5rem; }
|
||||
|
||||
/* Footer */
|
||||
.site-footer { margin-top: auto; padding: 2rem 1rem; border-top: 1px solid var(--border); }
|
||||
.site-footer .site-column { display: flex; align-items: center; gap: 1.5rem; }
|
||||
.footer-text { font-size: 0.8125rem; color: var(--subtle); font-family: var(--font-mono); }
|
||||
|
||||
/* Small button variant */
|
||||
.btn-sm { padding: 0.375rem 0.875rem; font-size: 0.8125rem; }
|
||||
|
|
@ -1,199 +0,0 @@
|
|||
/* ============================================================
|
||||
clavitor — global stylesheet
|
||||
ONE rule per class. No exceptions. No inline styles.
|
||||
All layout, spacing, color and type lives here.
|
||||
============================================================ */
|
||||
|
||||
/* === TOKENS === */
|
||||
:root {
|
||||
--width: 1280px;
|
||||
--pad: 2rem;
|
||||
--radius: 1rem;
|
||||
--radius-sm: 0.5rem;
|
||||
--gap: 1.25rem;
|
||||
|
||||
--bg: #0d1520;
|
||||
--surface: #142542;
|
||||
--surface-alt: #0e2414;
|
||||
--surface-gold: #2a1f00;
|
||||
|
||||
--border: rgba(255,255,255,0.09);
|
||||
--border-gold: rgba(212,175,55,0.3);
|
||||
|
||||
--text: #f1f5f9;
|
||||
--muted: #94a3b8;
|
||||
--subtle: #64748b;
|
||||
|
||||
--accent: #4ade80;
|
||||
--gold: #D4AF37;
|
||||
--red: #EF4444;
|
||||
|
||||
--font-sans: Inter, sans-serif;
|
||||
--font-mono: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
/* === RESET === */
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body { background: var(--bg); color: var(--text); font-family: var(--font-sans); line-height: 1.6; }
|
||||
a { color: inherit; text-decoration: none; }
|
||||
img, svg { display: block; max-width: 100%; }
|
||||
|
||||
/* === LAYOUT === */
|
||||
.container { max-width: var(--width); margin: 0 auto; padding: 0 var(--pad); }
|
||||
.section { padding-top: 5rem; padding-bottom: 5rem; }
|
||||
hr.divider { border: none; border-top: 1px solid var(--border); }
|
||||
|
||||
/* === TYPOGRAPHY === */
|
||||
h1 { font-size: clamp(2rem, 4vw, 3.5rem); font-weight: 800; line-height: 1.1; color: var(--text); }
|
||||
h2 { font-size: clamp(1.5rem, 3vw, 2.25rem); font-weight: 700; line-height: 1.2; color: var(--text); }
|
||||
h3 { font-size: 1.2rem; font-weight: 700; line-height: 1.3; color: var(--text); }
|
||||
p { color: var(--muted); font-size: 1rem; line-height: 1.75; }
|
||||
p.lead { font-size: 1.125rem; }
|
||||
|
||||
/* === LABELS === */
|
||||
.label { font-family: var(--font-mono); font-size: 0.7rem; font-weight: 500; letter-spacing: 0.12em; text-transform: uppercase; color: var(--subtle); }
|
||||
.label.accent { color: var(--accent); }
|
||||
.label.gold { color: var(--gold); }
|
||||
.label.red { color: var(--red); }
|
||||
|
||||
/* === VAULTNAME === */
|
||||
.vaultname { font-family: var(--font-mono); font-weight: 700; color: var(--text); }
|
||||
.vaultname .n { color: var(--accent); }
|
||||
|
||||
/* === CARDS === */
|
||||
.card { background: rgba(100,140,200,0.18); border: 1px solid rgba(148,163,184,0.22); border-radius: var(--radius); padding: 1.75rem; }
|
||||
.card.alt { background: rgba(34,197,94,0.15); border-color: rgba(34,197,94,0.3); }
|
||||
.card.gold { background: rgba(212,175,55,0.15); border-color: rgba(212,175,55,0.35); }
|
||||
.card.red { background: rgba(239,68,68,0.15); border-color: rgba(239,68,68,0.3); }
|
||||
.card-hover { transition: transform 0.2s, box-shadow 0.2s; border-color: rgba(255,255,255,0.12); }
|
||||
.card-hover:hover { transform: translateY(-2px); box-shadow: 0 8px 24px rgba(0,0,0,0.3); }
|
||||
|
||||
/* === GRID === */
|
||||
.grid-2 { display: grid; grid-template-columns: 1fr 1fr; gap: var(--gap); }
|
||||
.grid-3 { display: grid; grid-template-columns: 1fr 1fr 1fr; gap: var(--gap); }
|
||||
|
||||
/* === BUTTONS === */
|
||||
.btn { display: inline-block; font-family: var(--font-sans); font-size: 0.875rem; font-weight: 600; padding: 0.625rem 1.25rem; border-radius: var(--radius-sm); border: 1px solid transparent; cursor: pointer; transition: opacity 0.15s; text-align: center; }
|
||||
.btn:hover { opacity: 1; filter: brightness(1.15); }
|
||||
.btn-primary { background: var(--accent); color: var(--bg); border-color: var(--accent); }
|
||||
.btn-ghost { background: transparent; color: var(--text); border-color: rgba(74,222,128,0.4); }
|
||||
.btn-accent { background: rgba(34,197,94,0.15); color: var(--accent); border-color: rgba(34,197,94,0.3); }
|
||||
.btn-gold { background: rgba(212,175,55,0.15); color: var(--gold); border-color: rgba(212,175,55,0.3); }
|
||||
.btn-red { background: rgba(239,68,68,0.15); color: var(--red); border-color: rgba(239,68,68,0.3); }
|
||||
.btn-block { display: block; width: 100%; }
|
||||
.btn-row { display: flex; flex-wrap: wrap; gap: 1rem; }
|
||||
|
||||
/* === HERO === */
|
||||
.hero { padding-top: 100px; padding-bottom: 4rem; text-align: center; }
|
||||
.hero h1 { margin-bottom: 1rem; }
|
||||
.hero p.lead { max-width: 600px; margin-left: auto; margin-right: auto; }
|
||||
.hero-split { padding-top: 100px; padding-bottom: 4rem; display: grid; grid-template-columns: 1fr 1fr; gap: 4rem; align-items: center; }
|
||||
|
||||
/* === MAP === */
|
||||
.map-wrap { border-radius: var(--radius); overflow: hidden; border: 1px solid var(--border); }
|
||||
.map-wrap svg { display: block; width: 100%; background: var(--bg); }
|
||||
.map-gap { height: 1rem; }
|
||||
|
||||
/* === DC GRID (3 action cards below map) === */
|
||||
#dc-grid { display: flex; gap: var(--gap); }
|
||||
#dc-grid .dc-card { flex: 1; min-width: 0; border-radius: var(--radius); padding: 1rem; text-align: center; background: var(--surface); border: 1px solid var(--border); }
|
||||
#dc-grid .dc-card.gold { background: var(--surface-gold); border-color: var(--border-gold); }
|
||||
#dc-grid .dc-card.red { background: #1a0505; border-color: rgba(239,68,68,0.3); }
|
||||
#dc-grid .dc-icon { font-size: 1.5rem; margin-bottom: 0.375rem; }
|
||||
#dc-grid .dc-name { font-size: 0.875rem; font-weight: 600; color: var(--text); margin-bottom: 0.25rem; }
|
||||
#dc-grid .dc-sub { font-size: 0.75rem; color: var(--subtle); margin-bottom: 0.625rem; }
|
||||
#dc-grid .dc-status { display: flex; align-items: center; justify-content: center; gap: 0.375rem; font-size: 0.75rem; color: var(--subtle); margin-bottom: 0.75rem; }
|
||||
#dc-grid .dc-dot { width: 6px; height: 6px; border-radius: 50%; background: currentColor; flex-shrink: 0; }
|
||||
|
||||
/* === SPACING === */
|
||||
.mt-2 { margin-top: 0.5rem; } .mb-2 { margin-bottom: 0.5rem; }
|
||||
.mt-3 { margin-top: 0.75rem; } .mb-3 { margin-bottom: 0.75rem; }
|
||||
.mt-4 { margin-top: 1rem; } .mb-4 { margin-bottom: 1rem; }
|
||||
.mt-6 { margin-top: 1.5rem; } .mb-6 { margin-bottom: 1.5rem; }
|
||||
.mt-8 { margin-top: 2rem; } .mb-8 { margin-bottom: 2rem; }
|
||||
.mt-12 { margin-top: 3rem; } .mb-12 { margin-bottom: 3rem; }
|
||||
|
||||
/* === ANIMATIONS === */
|
||||
@keyframes hostedPulse { 0%,100% { opacity:1; transform:scale(1); } 50% { opacity:0.3; transform:scale(1.8); } }
|
||||
|
||||
/* === NAV === */
|
||||
.nav { position: fixed; top: 0; width: 100%; z-index: 50; background: rgba(10,22,40,0.85); backdrop-filter: blur(12px); border-bottom: 1px solid var(--border); }
|
||||
.nav-inner { max-width: var(--width); margin: 0 auto; padding: 0 var(--pad); height: 64px; display: flex; align-items: center; justify-content: space-between; }
|
||||
.nav-logo { font-family: var(--font-mono); font-weight: 700; font-size: 2rem; color: var(--text); line-height: 1; letter-spacing: -0.02em; }
|
||||
.nav-logo .n { color: var(--accent); }
|
||||
.nav-links { display: flex; align-items: center; gap: 1.5rem; font-size: 0.875rem; }
|
||||
.nav-link { color: var(--muted); transition: color 0.15s; }
|
||||
.nav-link:hover { color: var(--text); }
|
||||
.nav-link.active { color: var(--gold); font-weight: 600; display: flex; align-items: center; gap: 0.375rem; }
|
||||
.nav-link.active::before { content:''; display:inline-block; width:6px; height:6px; border-radius:50%; background:var(--gold); animation: hostedPulse 2s ease-in-out infinite; }
|
||||
|
||||
/* --- Nav dropdown --- */
|
||||
.nav-dropdown { position: relative; }
|
||||
.nav-dropdown > a { cursor: pointer; }
|
||||
.nav-dropdown-menu { display: none; position: absolute; top: 100%; right: 0; padding-top: 0.75rem; min-width: 10rem; z-index: 100; }
|
||||
.nav-dropdown-menu-inner { background: var(--surface); border: 1px solid var(--border); border-radius: var(--radius-sm); padding: 0.5rem 0; box-shadow: 0 8px 24px rgba(0,0,0,0.4); }
|
||||
.nav-dropdown-menu a { display: block; padding: 0.5rem 1rem; color: var(--muted); font-size: 0.8125rem; transition: color 0.15s, background 0.15s; }
|
||||
.nav-dropdown-menu a:hover { color: var(--text); background: rgba(255,255,255,0.05); }
|
||||
.nav-dropdown:hover .nav-dropdown-menu { display: block; }
|
||||
|
||||
/* === GRADIENT TEXT === */
|
||||
.gradient-text { background: linear-gradient(135deg, #22C55E 0%, #4ade80 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; }
|
||||
|
||||
/* === CODE BLOCKS === */
|
||||
.code-block { background: rgba(0,0,0,0.6); border: 1px solid rgba(148,163,184,0.2); border-radius: var(--radius); padding: 1.5rem; font-family: var(--font-mono); font-size: 0.875rem; overflow-x: auto; line-height: 1.7; }
|
||||
.code-block .prompt { color: var(--accent); }
|
||||
.code-block .comment { color: var(--subtle); }
|
||||
.code-label { font-size: 0.75rem; color: var(--subtle); margin-bottom: 0.75rem; font-family: var(--font-sans); }
|
||||
.code-block pre { margin: 0; color: var(--muted); }
|
||||
pre, code { font-family: var(--font-mono); }
|
||||
|
||||
/* === FEATURE ICON === */
|
||||
.feature-icon { width: 2.5rem; height: 2.5rem; border-radius: 0.5rem; background: rgba(34,197,94,0.1); display: flex; align-items: center; justify-content: center; margin-bottom: 1.25rem; flex-shrink: 0; }
|
||||
.feature-icon svg { width: 1.25rem; height: 1.25rem; color: var(--accent); stroke: var(--accent); }
|
||||
.feature-icon.red { background: rgba(239,68,68,0.1); }
|
||||
.feature-icon.red svg { color: var(--red); stroke: var(--red); }
|
||||
|
||||
/* === CHECK LIST === */
|
||||
.checklist { list-style: none; }
|
||||
.checklist li { display: flex; align-items: flex-start; gap: 0.75rem; font-size: 0.875rem; color: var(--muted); margin-bottom: 0.75rem; }
|
||||
.checklist li::before { content: ''; width: 1rem; height: 1rem; flex-shrink: 0; background: var(--accent); border-radius: 50%; margin-top: 0.125rem; clip-path: polygon(20% 50%, 40% 70%, 80% 25%, 85% 30%, 40% 80%, 15% 55%); }
|
||||
.checklist.red li::before { background: var(--red); }
|
||||
|
||||
/* === FOOTER === */
|
||||
.footer { border-top: 1px solid var(--border); padding: 3rem 0; }
|
||||
.footer-inner { display: flex; flex-direction: row; align-items: center; justify-content: space-between; flex-wrap: wrap; gap: 1.5rem; }
|
||||
.footer-links { display: flex; align-items: center; gap: 1rem; font-size: 0.875rem; color: var(--subtle); }
|
||||
.footer-links a { color: var(--subtle); transition: color 0.15s; }
|
||||
.footer-links a:hover { color: var(--muted); }
|
||||
.footer-copy { text-align: center; font-size: 0.75rem; color: var(--subtle); margin-top: 2rem; }
|
||||
|
||||
/* === PROSE (legal pages) === */
|
||||
.prose h2 { font-size: 1.375rem; font-weight: 700; color: var(--text); margin: 2.5rem 0 1rem; }
|
||||
.prose h3 { font-size: 1.1rem; font-weight: 600; color: var(--text); margin: 1.75rem 0 0.75rem; }
|
||||
.prose p { color: var(--muted); line-height: 1.8; margin-bottom: 1rem; }
|
||||
.prose ul { color: var(--muted); padding-left: 1.5rem; margin-bottom: 1rem; line-height: 1.8; }
|
||||
.prose a { color: var(--accent); }
|
||||
.prose a:hover { text-decoration: underline; }
|
||||
|
||||
/* === BADGE === */
|
||||
.badge { display: inline-block; font-family: var(--font-mono); font-size: 0.7rem; font-weight: 600; padding: 0.25rem 0.625rem; border-radius: 9999px; }
|
||||
.badge.accent { background: rgba(34,197,94,0.15); color: var(--accent); border: 1px solid rgba(34,197,94,0.3); }
|
||||
.badge.gold { background: rgba(212,175,55,0.15); color: var(--gold); border: 1px solid rgba(212,175,55,0.3); }
|
||||
.badge.recommended { background: var(--accent); color: var(--bg); }
|
||||
.badge.red { background: rgba(239,68,68,0.15); color: var(--red); border: 1px solid rgba(239,68,68,0.3); }
|
||||
|
||||
/* === PRICING CARDS === */
|
||||
.price-card { border-radius: var(--radius); border: 1px solid var(--border); padding: 2.5rem; background: rgba(100,140,200,0.08); }
|
||||
.price-card.featured { border-color: rgba(34,197,94,0.4); background: rgba(34,197,94,0.06); }
|
||||
.price-amount { font-size: 3rem; font-weight: 800; color: var(--text); line-height: 1; }
|
||||
.price-period { font-size: 1rem; color: var(--muted); font-weight: 400; }
|
||||
|
||||
/* === SCROLL === */
|
||||
html { scroll-behavior: smooth; }
|
||||
|
||||
/* === ANIMATIONS (pulse for map/decorative) === */
|
||||
@keyframes pulseDot { 0%,100% { transform:scale(1); } 50% { transform:scale(1.15); } }
|
||||
@keyframes pulseRing { 0% { transform:scale(0.8); opacity:1; } 100% { transform:scale(2.5); opacity:0; } }
|
||||
.pulse-dot { animation: pulseDot 2s ease-in-out infinite; }
|
||||
.pulse-ring { animation: pulseRing 2s ease-out infinite; }
|
||||
.pulse-ring-2 { animation: pulseRing 2s ease-out infinite 0.5s; }
|
||||
|
|
@ -1,219 +0,0 @@
|
|||
/*
|
||||
* clavitor — shared crypto module
|
||||
* Runs in both QuickJS (CLI) and browser (extension).
|
||||
*
|
||||
* In CLI (QuickJS): native_* functions provided by jsbridge.c via BearSSL.
|
||||
* All calls are synchronous.
|
||||
* In browser: Web Crypto API used directly (async).
|
||||
*
|
||||
* This file is the single source of truth for L2/L3 field crypto.
|
||||
*/
|
||||
|
||||
/* Detect environment */
|
||||
const IS_BROWSER = typeof globalThis.crypto !== 'undefined'
|
||||
&& typeof globalThis.crypto.subtle !== 'undefined';
|
||||
|
||||
/* --- base64 helpers --- */
|
||||
|
||||
function uint8_to_base64(bytes) {
|
||||
if (IS_BROWSER) {
|
||||
return btoa(String.fromCharCode.apply(null, bytes));
|
||||
} else {
|
||||
return native_base64_encode(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
function base64_to_uint8(str) {
|
||||
if (IS_BROWSER) {
|
||||
const bin = atob(str);
|
||||
const bytes = new Uint8Array(bin.length);
|
||||
for (let i = 0; i < bin.length; i++) bytes[i] = bin.charCodeAt(i);
|
||||
return bytes;
|
||||
} else {
|
||||
return native_base64_decode(str);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- AES-GCM --- */
|
||||
|
||||
/**
|
||||
* Encrypt plaintext with AES-GCM.
|
||||
* @param {Uint8Array} key - 16 bytes (AES-128) or 32 bytes (AES-256)
|
||||
* @param {Uint8Array} plaintext
|
||||
* @returns {Uint8Array|Promise<Uint8Array>} nonce(12) || ciphertext || tag(16)
|
||||
*/
|
||||
function aes_gcm_encrypt(key, plaintext) {
|
||||
if (IS_BROWSER) {
|
||||
const iv = crypto.getRandomValues(new Uint8Array(12));
|
||||
return crypto.subtle.importKey(
|
||||
'raw', key, { name: 'AES-GCM' }, false, ['encrypt']
|
||||
).then(function(cryptoKey) {
|
||||
return crypto.subtle.encrypt({ name: 'AES-GCM', iv: iv }, cryptoKey, plaintext);
|
||||
}).then(function(ct) {
|
||||
const result = new Uint8Array(12 + ct.byteLength);
|
||||
result.set(iv, 0);
|
||||
result.set(new Uint8Array(ct), 12);
|
||||
return result;
|
||||
});
|
||||
} else {
|
||||
/* QuickJS: synchronous BearSSL binding */
|
||||
return native_aes_gcm_encrypt(key, plaintext);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt AES-GCM ciphertext.
|
||||
* @param {Uint8Array} key - 16 or 32 bytes
|
||||
* @param {Uint8Array} data - nonce(12) || ciphertext || tag(16)
|
||||
* @returns {Uint8Array|Promise<Uint8Array>} plaintext
|
||||
*/
|
||||
function aes_gcm_decrypt(key, data) {
|
||||
if (data.length < 28) throw new Error('ciphertext too short');
|
||||
|
||||
/* Use subarray for typed array compatibility (QuickJS) */
|
||||
var iv, ct;
|
||||
if (typeof data.subarray === 'function') {
|
||||
iv = new Uint8Array(data.subarray(0, 12));
|
||||
ct = new Uint8Array(data.subarray(12));
|
||||
} else {
|
||||
iv = data.slice(0, 12);
|
||||
ct = data.slice(12);
|
||||
}
|
||||
|
||||
if (IS_BROWSER) {
|
||||
return crypto.subtle.importKey(
|
||||
'raw', key, { name: 'AES-GCM' }, false, ['decrypt']
|
||||
).then(function(cryptoKey) {
|
||||
return crypto.subtle.decrypt({ name: 'AES-GCM', iv: iv }, cryptoKey, ct);
|
||||
}).then(function(pt) {
|
||||
return new Uint8Array(pt);
|
||||
});
|
||||
} else {
|
||||
/* Pass full data blob to native — C splits nonce/ct internally */
|
||||
return native_aes_gcm_decrypt_blob(key, data);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- HKDF-SHA256 --- */
|
||||
|
||||
/**
|
||||
* HKDF-SHA256 extract + expand.
|
||||
* @param {Uint8Array} ikm - input key material
|
||||
* @param {Uint8Array|null} salt - optional salt
|
||||
* @param {Uint8Array} info - context info
|
||||
* @param {number} length - output length in bytes
|
||||
* @returns {Uint8Array|Promise<Uint8Array>}
|
||||
*/
|
||||
function hkdf_sha256(ikm, salt, info, length) {
|
||||
if (IS_BROWSER) {
|
||||
return crypto.subtle.importKey(
|
||||
'raw', ikm, 'HKDF', false, ['deriveBits']
|
||||
).then(function(cryptoKey) {
|
||||
return crypto.subtle.deriveBits(
|
||||
{ name: 'HKDF', hash: 'SHA-256', salt: salt || new Uint8Array(0), info: info },
|
||||
cryptoKey, length * 8
|
||||
);
|
||||
}).then(function(bits) {
|
||||
return new Uint8Array(bits);
|
||||
});
|
||||
} else {
|
||||
return native_hkdf_sha256(ikm, salt, info, length);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Field encryption/decryption --- */
|
||||
|
||||
/**
|
||||
* Encrypt a field value.
|
||||
* Key length determines tier: 16 bytes = L2 (AES-128), 32 bytes = L3 (AES-256).
|
||||
* @param {Uint8Array} key - 16 or 32 bytes
|
||||
* @param {string} field_label - field label (for per-field key derivation)
|
||||
* @param {string} plaintext - field value to encrypt
|
||||
* @returns {string|Promise<string>} base64-encoded ciphertext
|
||||
*/
|
||||
/*
|
||||
* Normalize key for AES: 8-byte keys are doubled to 16 bytes.
|
||||
* AES requires 16, 24, or 32 byte keys.
|
||||
* HKDF output length matches the (normalized) key length.
|
||||
*/
|
||||
function normalize_key(key) {
|
||||
if (key.length === 8) {
|
||||
var doubled = new Uint8Array(16);
|
||||
doubled.set(key, 0);
|
||||
doubled.set(key, 8);
|
||||
return doubled;
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
||||
function encrypt_field(key, field_label, plaintext) {
|
||||
var info_str = 'clavitor-field-' + field_label;
|
||||
var nkey = normalize_key(key);
|
||||
var aes_len = nkey.length; /* 16 or 32 */
|
||||
|
||||
if (IS_BROWSER) {
|
||||
var enc = new TextEncoder();
|
||||
var info = enc.encode(info_str);
|
||||
return hkdf_sha256(nkey, null, info, aes_len).then(function(field_key) {
|
||||
return aes_gcm_encrypt(field_key, enc.encode(plaintext));
|
||||
}).then(function(ct) {
|
||||
return uint8_to_base64(ct);
|
||||
});
|
||||
} else {
|
||||
var info = native_encode_utf8(info_str);
|
||||
var field_key = native_hkdf_sha256(nkey, null, info, aes_len);
|
||||
var pt_bytes = native_encode_utf8(plaintext);
|
||||
var ct = native_aes_gcm_encrypt(field_key, pt_bytes);
|
||||
return native_base64_encode(ct);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt a field value.
|
||||
* Key length determines tier: 16 bytes = L2, 32 bytes = L3.
|
||||
* @param {Uint8Array} key - 16 or 32 bytes
|
||||
* @param {string} field_label - field label
|
||||
* @param {string} ciphertext_b64 - base64-encoded ciphertext
|
||||
* @returns {string|Promise<string>} plaintext
|
||||
*/
|
||||
function decrypt_field(key, field_label, ciphertext_b64) {
|
||||
var info_str = 'clavitor-field-' + field_label;
|
||||
var nkey = normalize_key(key);
|
||||
var aes_len = nkey.length;
|
||||
|
||||
if (IS_BROWSER) {
|
||||
var enc = new TextEncoder();
|
||||
var dec = new TextDecoder();
|
||||
var info = enc.encode(info_str);
|
||||
return hkdf_sha256(nkey, null, info, aes_len).then(function(field_key) {
|
||||
var ct = base64_to_uint8(ciphertext_b64);
|
||||
return aes_gcm_decrypt(field_key, ct);
|
||||
}).then(function(pt) {
|
||||
return dec.decode(pt);
|
||||
});
|
||||
} else {
|
||||
var info = native_encode_utf8(info_str);
|
||||
var field_key = native_hkdf_sha256(nkey, null, info, aes_len);
|
||||
var ct = native_base64_decode(ciphertext_b64);
|
||||
var pt = native_aes_gcm_decrypt_blob(field_key, ct);
|
||||
return native_decode_utf8(pt);
|
||||
}
|
||||
}
|
||||
|
||||
/* Backward compat aliases */
|
||||
function l2_encrypt_field(key, entry_id, label, pt) { return encrypt_field(key, label, pt); }
|
||||
function l2_decrypt_field(key, entry_id, label, ct) { return decrypt_field(key, label, ct); }
|
||||
|
||||
/* Export for both environments */
|
||||
if (typeof globalThis.clavitor === 'undefined') globalThis.clavitor = {};
|
||||
globalThis.clavitor.crypto = {
|
||||
aes_gcm_encrypt: aes_gcm_encrypt,
|
||||
aes_gcm_decrypt: aes_gcm_decrypt,
|
||||
hkdf_sha256: hkdf_sha256,
|
||||
encrypt_field: encrypt_field,
|
||||
decrypt_field: decrypt_field,
|
||||
l2_encrypt_field: l2_encrypt_field,
|
||||
l2_decrypt_field: l2_decrypt_field,
|
||||
uint8_to_base64: uint8_to_base64,
|
||||
base64_to_uint8: base64_to_uint8
|
||||
};
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -1,308 +0,0 @@
|
|||
/*
|
||||
* clavitor — crypto test suite
|
||||
* Runs in both QuickJS (CLI) and browser.
|
||||
*
|
||||
* CLI: clavitor-cli test-roundtrip
|
||||
* Web: open browser console, paste: fetch('/app/test_crypto.js').then(r=>r.text()).then(eval)
|
||||
* or load as <script> in a test page
|
||||
*
|
||||
* All tests must produce identical results on both platforms.
|
||||
* If any test fails on one but passes on the other, the shared crypto is broken.
|
||||
*/
|
||||
|
||||
(function() {
|
||||
var R = []; /* results */
|
||||
var FAIL = false;
|
||||
|
||||
function pass(name) { R.push('PASS ' + name); }
|
||||
function fail(name, detail) { R.push('FAIL ' + name + (detail ? ' — ' + detail : '')); FAIL = true; }
|
||||
|
||||
/* Wrap a test that might be sync (QuickJS) or async (browser) */
|
||||
function resolve(val, cb) {
|
||||
if (val && typeof val.then === 'function') {
|
||||
val.then(function(r) { cb(r); }).catch(function(e) { cb(null, e); });
|
||||
} else {
|
||||
cb(val);
|
||||
}
|
||||
}
|
||||
|
||||
/* Safe call: catches sync throws (QuickJS) and async rejections (browser) */
|
||||
function safe(fn, cb) {
|
||||
try {
|
||||
var result = fn();
|
||||
if (result && typeof result.then === 'function') {
|
||||
result.then(function(r) { cb(r); }).catch(function(e) { cb(null, e); });
|
||||
} else {
|
||||
cb(result);
|
||||
}
|
||||
} catch(e) {
|
||||
cb(null, e);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- Key fixtures --- */
|
||||
var K8 = new Uint8Array([11,22,33,44,55,66,77,88]);
|
||||
var K16 = new Uint8Array([11,22,33,44,55,66,77,88,99,110,111,112,113,114,115,116]);
|
||||
var K32 = new Uint8Array([11,22,33,44,55,66,77,88,99,110,111,112,113,114,115,116,
|
||||
201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216]);
|
||||
/* K16 is the first 16 bytes of K32 — by design (truncation model) */
|
||||
/* K8 is the first 8 bytes of K16 — by design */
|
||||
|
||||
var WRONG_K16 = new Uint8Array([255,22,33,44,55,66,77,88,99,110,111,112,113,114,115,116]);
|
||||
|
||||
var tests = [];
|
||||
|
||||
/* --- Test 1: L1 (8-byte) roundtrip --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L1 (8B) encrypt/decrypt roundtrip';
|
||||
resolve(clavitor.crypto.encrypt_field(K8, 'username', 'johanj'), function(ct, err) {
|
||||
if (err) { fail(name, err.message); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K8, 'username', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === 'johanj') pass(name);
|
||||
else fail(name, 'got "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 2: L2 (16-byte) roundtrip --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L2 (16B) encrypt/decrypt roundtrip';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'password', 's3cret!P@ss'), function(ct, err) {
|
||||
if (err) { fail(name, err.message); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K16, 'password', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === 's3cret!P@ss') pass(name);
|
||||
else fail(name, 'got "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 3: L3 (32-byte) roundtrip --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L3 (32B) encrypt/decrypt roundtrip';
|
||||
resolve(clavitor.crypto.encrypt_field(K32, 'passport', 'NL12345678'), function(ct, err) {
|
||||
if (err) { fail(name, err.message); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K32, 'passport', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === 'NL12345678') pass(name);
|
||||
else fail(name, 'got "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 4: L2 key cannot decrypt L3 ciphertext --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L2 key rejects L3 ciphertext';
|
||||
resolve(clavitor.crypto.encrypt_field(K32, 'passport', 'NL12345678'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed: ' + err.message); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'passport', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L2 key decrypted L3 data to "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 5: L3 key cannot decrypt L2 ciphertext --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L3 key rejects L2 ciphertext';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'password', 'secret'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed: ' + err.message); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K32, 'password', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L3 key decrypted L2 data to "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 6: Wrong key rejection --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'wrong key rejected';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'secret', 'value'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(WRONG_K16, 'secret', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'wrong key decrypted to "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 7: Wrong label rejection --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'wrong label rejected';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'labelA', 'value'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'labelB', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'wrong label decrypted to "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 8: Empty string roundtrip --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'empty string roundtrip';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'empty', ''), function(ct, err) {
|
||||
if (err) { fail(name, err.message); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K16, 'empty', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === '') pass(name);
|
||||
else fail(name, 'got "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 9: Unicode roundtrip --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'unicode roundtrip';
|
||||
var unicode = 'pässwörd 密码 🔑';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'intl', unicode), function(ct, err) {
|
||||
if (err) { fail(name, err.message); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K16, 'intl', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === unicode) pass(name);
|
||||
else fail(name, 'mismatch');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 10: L1 key (8B) produces different ciphertext than L2 key (16B) --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L1 and L2 keys produce different ciphertexts';
|
||||
/* K8 doubled = [11,22,33,44,55,66,77,88,11,22,33,44,55,66,77,88]
|
||||
* K16 = [11,22,33,44,55,66,77,88,99,110,111,112,113,114,115,116]
|
||||
* These are different keys after normalization, so HKDF produces different field keys.
|
||||
* L1 ciphertext must NOT be decryptable with L2 key. */
|
||||
resolve(clavitor.crypto.encrypt_field(K8, 'field', 'test'), function(ct1, err) {
|
||||
if (err) { fail(name, 'L1 encrypt: ' + err.message); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'field', ct1); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L2 key decrypted L1 data');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 11: TOTP generation (RFC 6238 test vector) --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'TOTP RFC 6238 test vector';
|
||||
var result = clavitor.totp.generate_totp('GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ', 59, 30, 6);
|
||||
resolve(result, function(code, err) {
|
||||
if (err) fail(name, err.message);
|
||||
else if (code === '287082') pass(name);
|
||||
else fail(name, 'got ' + code + ', expected 287082');
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 12: TOTP second test vector (time=1111111109) --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'TOTP RFC 6238 test vector #2';
|
||||
var result = clavitor.totp.generate_totp('GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQ', 1111111109, 30, 8);
|
||||
resolve(result, function(code, err) {
|
||||
if (err) fail(name, err.message);
|
||||
else if (code === '07081804') pass(name);
|
||||
else fail(name, 'got ' + code + ', expected 07081804');
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 13: L3 TOTP seed cannot be read with L2 key --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L3 TOTP seed inaccessible to agent (L2 key)';
|
||||
var seed = 'JBSWY3DPEHPK3PXP'; // a TOTP seed
|
||||
resolve(clavitor.crypto.encrypt_field(K32, 'totp', seed), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
// Agent has K16 (L2), tries to decrypt L3 TOTP seed
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'totp', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L2 key decrypted L3 TOTP seed to "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 14: L2 TOTP seed IS accessible with L2 key --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L2 TOTP seed accessible to agent (L2 key)';
|
||||
var seed = 'JBSWY3DPEHPK3PXP';
|
||||
resolve(clavitor.crypto.encrypt_field(K16, 'totp', seed), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
resolve(clavitor.crypto.decrypt_field(K16, 'totp', ct), function(pt, err2) {
|
||||
if (err2) fail(name, err2.message);
|
||||
else if (pt === seed) pass(name);
|
||||
else fail(name, 'got "' + pt + '"');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 15: L3 card number cannot be read with L2 key --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'L3 card number inaccessible to agent (L2 key)';
|
||||
resolve(clavitor.crypto.encrypt_field(K32, 'Number', '5452120017212208'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'Number', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L2 key decrypted L3 card number');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Test 16: Truncation model — L2 key is prefix of L3 but cannot derive L3 --- */
|
||||
tests.push(function(done) {
|
||||
var name = 'truncation: L2 prefix of L3, still cannot decrypt L3';
|
||||
// K16 = K32[0..16] by design. Encrypt with full K32, try with K16.
|
||||
resolve(clavitor.crypto.encrypt_field(K32, 'secret', 'classified'), function(ct, err) {
|
||||
if (err) { fail(name, 'encrypt failed'); done(); return; }
|
||||
safe(function() { return clavitor.crypto.decrypt_field(K16, 'secret', ct); }, function(pt, err2) {
|
||||
if (err2) pass(name);
|
||||
else fail(name, 'L2 (prefix of L3) decrypted L3 data');
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
/* --- Runner --- */
|
||||
function run(idx) {
|
||||
if (idx >= tests.length) {
|
||||
/* Summary */
|
||||
var summary = '\n' + R.join('\n') + '\n\n' +
|
||||
(FAIL ? 'FAILED' : 'ALL ' + tests.length + ' TESTS PASSED');
|
||||
|
||||
if (typeof globalThis.document !== 'undefined') {
|
||||
/* Browser: log to console */
|
||||
console.log(summary);
|
||||
R.forEach(function(line) {
|
||||
if (line.indexOf('FAIL') === 0) console.error(line);
|
||||
else console.log(line);
|
||||
});
|
||||
}
|
||||
|
||||
/* Return result string (for QuickJS eval or browser display) */
|
||||
globalThis._clavitor_test_result = summary;
|
||||
return;
|
||||
}
|
||||
tests[idx](function() { run(idx + 1); });
|
||||
}
|
||||
|
||||
run(0);
|
||||
|
||||
/* For sync environments (QuickJS), result is available immediately */
|
||||
if (typeof globalThis._clavitor_test_result !== 'undefined') {
|
||||
/* Used by CLI eval */
|
||||
}
|
||||
})();
|
||||
|
||||
/* Return result for jsbridge_eval */
|
||||
globalThis._clavitor_test_result || 'RUNNING (async — check console)';
|
||||
|
|
@ -1,141 +0,0 @@
|
|||
// Theme management (before anything else so it applies immediately, no flash)
|
||||
var _themes = ['', 'theme-light', 'theme-midnight'];
|
||||
var _currentTheme = localStorage.getItem('clavitor_theme') || '';
|
||||
if (_currentTheme) document.body.className = _currentTheme;
|
||||
|
||||
function cycleTheme() {
|
||||
var idx = _themes.indexOf(_currentTheme);
|
||||
_currentTheme = _themes[(idx + 1) % _themes.length];
|
||||
document.body.className = _currentTheme;
|
||||
localStorage.setItem('clavitor_theme', _currentTheme);
|
||||
}
|
||||
|
||||
// Stateless auth: L1 Bearer from master key in sessionStorage.
|
||||
function getL1Bearer() {
|
||||
var masterB64 = sessionStorage.getItem('clavitor_master');
|
||||
if (!masterB64) return null;
|
||||
var bin = atob(masterB64);
|
||||
var l1 = new Uint8Array(8);
|
||||
for (var i = 0; i < 8; i++) l1[i] = bin.charCodeAt(i);
|
||||
var b64 = btoa(String.fromCharCode.apply(null, l1));
|
||||
return b64.replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
// Global API helper. Every request sends L1 as Bearer.
|
||||
async function api(method, path, body) {
|
||||
var opts = { method: method, headers: { 'Content-Type': 'application/json' } };
|
||||
var bearer = getL1Bearer();
|
||||
if (bearer) opts.headers['Authorization'] = 'Bearer ' + bearer;
|
||||
if (body) opts.body = JSON.stringify(body);
|
||||
var res = await fetch(path, opts);
|
||||
if (res.status === 401) {
|
||||
sessionStorage.removeItem('clavitor_master');
|
||||
location.href = '/app/';
|
||||
throw new Error('Unauthorized');
|
||||
}
|
||||
return res.json();
|
||||
}
|
||||
|
||||
// --- Idle timer state (module-level, survives re-init) ---
|
||||
var _idleTimer = null;
|
||||
var _countdownTimer = null;
|
||||
var _countdownVal = 0;
|
||||
|
||||
function _resetIdle() {
|
||||
if (!sessionStorage.getItem('clavitor_master')) return;
|
||||
clearTimeout(_idleTimer);
|
||||
_clearCountdown();
|
||||
_idleTimer = setTimeout(_startCountdown, 60000);
|
||||
}
|
||||
|
||||
function _startCountdown() {
|
||||
_countdownVal = 15;
|
||||
var banner = document.createElement('div');
|
||||
banner.id = 'lockCountdownBanner';
|
||||
banner.style.cssText = 'position:fixed;top:0;left:0;right:0;z-index:9999;background:var(--gold,#D4AF37);color:#000;text-align:center;padding:0.5rem;font-weight:600;font-size:0.875rem;';
|
||||
banner.innerHTML = 'Vault locking in <span id="lockCountdownSec">15</span>s — press any key to stay unlocked';
|
||||
document.body.appendChild(banner);
|
||||
|
||||
_countdownTimer = setInterval(function() {
|
||||
_countdownVal--;
|
||||
if (_countdownVal <= 0) {
|
||||
_clearCountdown();
|
||||
sessionStorage.removeItem('clavitor_master');
|
||||
window.location.href = '/app/';
|
||||
} else {
|
||||
var el = document.getElementById('lockCountdownSec');
|
||||
if (el) el.textContent = _countdownVal;
|
||||
}
|
||||
}, 1000);
|
||||
}
|
||||
|
||||
function _clearCountdown() {
|
||||
clearInterval(_countdownTimer);
|
||||
_countdownTimer = null;
|
||||
var b = document.getElementById('lockCountdownBanner');
|
||||
if (b) b.remove();
|
||||
}
|
||||
|
||||
// Initialize topbar: populate nav, start idle timer, check vault lock.
|
||||
// Called on page load AND after restoreAppLayout() rebuilds the DOM.
|
||||
function initTopbar() {
|
||||
var path = location.pathname.replace(/\/$/, '');
|
||||
var isVault = (path === '/app' || path === '');
|
||||
var links = [
|
||||
{ href: '/app/', label: 'Vault', match: '/app' },
|
||||
{ href: '/app/agents.html', label: 'Agents', match: '/app/agents' },
|
||||
{ href: '/app/security.html', label: 'Security', match: '/app/security' }
|
||||
];
|
||||
|
||||
var nav = '';
|
||||
links.forEach(function(l) {
|
||||
var active = path === l.match || path === l.match + '.html';
|
||||
nav += '<a href="' + l.href + '"' + (active ? ' class="topbar-active"' : '') + '>' + l.label + '</a>';
|
||||
});
|
||||
|
||||
nav += '<button onclick="cycleTheme()" class="topbar-lock" style="font-size:0.75rem;opacity:0.6" title="Switch theme">Theme</button>';
|
||||
nav += '<button onclick="if(typeof lockVault===\'function\'){lockVault()}else{sessionStorage.removeItem(\'clavitor_master\');location.href=\'/app/\'}" class="topbar-lock">Lock</button>';
|
||||
|
||||
var logo = isVault
|
||||
? '<span class="topbar-logo vaultname" onclick="if(typeof showAbout===\'function\')showAbout()" style="cursor:pointer">clav<span class="n">itor</span></span>'
|
||||
: '<a href="/app/" class="topbar-logo vaultname">clav<span class="n">itor</span></a>';
|
||||
|
||||
var el = document.getElementById('topbar');
|
||||
if (el) {
|
||||
el.innerHTML =
|
||||
'<header class="topbar">' +
|
||||
'<div class="topbar-inner" style="padding:0 1rem">' +
|
||||
logo +
|
||||
'<div class="topbar-links">' + nav + '</div>' +
|
||||
'</div>' +
|
||||
'</header>';
|
||||
}
|
||||
|
||||
// Start idle timer if authenticated
|
||||
if (getL1Bearer()) {
|
||||
_resetIdle();
|
||||
['keydown', 'mousedown', 'touchstart'].forEach(function(evt) {
|
||||
document.removeEventListener(evt, _resetIdle, true); // prevent duplicates
|
||||
document.addEventListener(evt, _resetIdle, true);
|
||||
});
|
||||
|
||||
// Check vault lock
|
||||
fetch('/api/vault-lock', {
|
||||
headers: { 'Authorization': 'Bearer ' + getL1Bearer() }
|
||||
}).then(function(r) { return r.json(); }).then(function(data) {
|
||||
if (data && data.locked) {
|
||||
var banner = document.createElement('div');
|
||||
banner.id = 'vaultLockBanner';
|
||||
banner.className = 'vault-lock-banner';
|
||||
banner.innerHTML =
|
||||
'<span>Vault locked: ' + (data.locked_reason || 'Unknown') + '</span>' +
|
||||
'<button onclick="unlockVault()" class="btn btn-sm" style="background:rgba(255,255,255,0.15);color:#fff;border:1px solid rgba(255,255,255,0.3)">Unlock</button>';
|
||||
var topbar = document.querySelector('.topbar');
|
||||
if (topbar) topbar.parentNode.insertBefore(banner, topbar.nextSibling);
|
||||
}
|
||||
}).catch(function() {});
|
||||
}
|
||||
}
|
||||
|
||||
// Run on initial page load
|
||||
initTopbar();
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
/*
|
||||
* clavitor — TOTP generation (RFC 6238)
|
||||
* Runs in both QuickJS (CLI) and browser (extension).
|
||||
*
|
||||
* In CLI (QuickJS): native_hmac_sha1 provided by jsbridge.c via BearSSL.
|
||||
* All calls are synchronous.
|
||||
* In browser: Web Crypto API (async).
|
||||
*/
|
||||
|
||||
/* IS_BROWSER defined in crypto.js, reuse if available */
|
||||
var IS_BROWSER_TOTP = (typeof IS_BROWSER !== 'undefined') ? IS_BROWSER
|
||||
: (typeof globalThis.crypto !== 'undefined' && typeof globalThis.crypto.subtle !== 'undefined');
|
||||
|
||||
/* --- Base32 decode (RFC 4648) --- */
|
||||
|
||||
function base32_decode(input) {
|
||||
var alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567';
|
||||
input = input.replace(/[\s=]/g, '').toUpperCase();
|
||||
|
||||
var bits = 0, value = 0, idx = 0;
|
||||
var output = new Uint8Array(Math.floor(input.length * 5 / 8));
|
||||
|
||||
for (var i = 0; i < input.length; i++) {
|
||||
var v = alphabet.indexOf(input[i]);
|
||||
if (v < 0) continue;
|
||||
value = (value << 5) | v;
|
||||
bits += 5;
|
||||
if (bits >= 8) {
|
||||
output[idx++] = (value >>> (bits - 8)) & 0xFF;
|
||||
bits -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
return new Uint8Array(output.buffer, 0, idx);
|
||||
}
|
||||
|
||||
/* --- HMAC-SHA1 --- */
|
||||
|
||||
function hmac_sha1(key, data) {
|
||||
if (IS_BROWSER_TOTP) {
|
||||
return crypto.subtle.importKey(
|
||||
'raw', key, { name: 'HMAC', hash: 'SHA-1' }, false, ['sign']
|
||||
).then(function(cryptoKey) {
|
||||
return crypto.subtle.sign('HMAC', cryptoKey, data);
|
||||
}).then(function(sig) {
|
||||
return new Uint8Array(sig);
|
||||
});
|
||||
} else {
|
||||
return native_hmac_sha1(key, data);
|
||||
}
|
||||
}
|
||||
|
||||
/* --- TOTP (RFC 6238) --- */
|
||||
|
||||
/**
|
||||
* Generate a TOTP code.
|
||||
* @param {string} secret_b32 - base32-encoded TOTP secret
|
||||
* @param {number} [time] - Unix timestamp (default: now)
|
||||
* @param {number} [period] - Time step in seconds (default: 30)
|
||||
* @param {number} [digits] - Number of digits (default: 6)
|
||||
* @returns {string|Promise<string>} TOTP code (zero-padded)
|
||||
*/
|
||||
function generate_totp(secret_b32, time, period, digits) {
|
||||
period = period || 30;
|
||||
digits = digits || 6;
|
||||
time = time || Math.floor(Date.now() / 1000);
|
||||
|
||||
var key = base32_decode(secret_b32);
|
||||
var counter = Math.floor(time / period);
|
||||
|
||||
/* Encode counter as 8-byte big-endian */
|
||||
var msg = new Uint8Array(8);
|
||||
var tmp = counter;
|
||||
for (var i = 7; i >= 0; i--) {
|
||||
msg[i] = tmp & 0xFF;
|
||||
tmp = Math.floor(tmp / 256);
|
||||
}
|
||||
|
||||
function truncate(hash) {
|
||||
var offset = hash[hash.length - 1] & 0x0F;
|
||||
var code = (
|
||||
((hash[offset] & 0x7F) << 24) |
|
||||
((hash[offset + 1] & 0xFF) << 16) |
|
||||
((hash[offset + 2] & 0xFF) << 8) |
|
||||
(hash[offset + 3] & 0xFF)
|
||||
) % Math.pow(10, digits);
|
||||
var s = code.toString();
|
||||
while (s.length < digits) s = '0' + s;
|
||||
return s;
|
||||
}
|
||||
|
||||
if (IS_BROWSER_TOTP) {
|
||||
return hmac_sha1(key, msg).then(truncate);
|
||||
} else {
|
||||
return truncate(hmac_sha1(key, msg));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Time remaining until current TOTP code expires.
|
||||
* @param {number} [period] - Time step (default: 30)
|
||||
* @returns {number} seconds remaining
|
||||
*/
|
||||
function totp_remaining(period) {
|
||||
period = period || 30;
|
||||
return period - (Math.floor(Date.now() / 1000) % period);
|
||||
}
|
||||
|
||||
/* Export */
|
||||
if (typeof globalThis.clavitor === 'undefined') globalThis.clavitor = {};
|
||||
globalThis.clavitor.totp = {
|
||||
generate_totp: generate_totp,
|
||||
totp_remaining: totp_remaining,
|
||||
base32_decode: base32_decode
|
||||
};
|
||||
|
|
@ -1,282 +0,0 @@
|
|||
// webauthn.js — WebAuthn PRF for Clavitor key derivation
|
||||
//
|
||||
// Derives the master secret from hardware authenticator (WebAuthn PRF).
|
||||
// Truncation model:
|
||||
// L1 / vault_id = bytes[0..8] (8 bytes)
|
||||
// L2 = bytes[0..16] (16 bytes, AES-128-GCM)
|
||||
// L3 = bytes[0..32] (32 bytes, AES-256-GCM)
|
||||
//
|
||||
// Field encryption/decryption is handled by crypto.js (shared with CLI).
|
||||
|
||||
(function(window) {
|
||||
'use strict';
|
||||
|
||||
var SESSION_KEY = 'clavitor_master';
|
||||
var HKDF_SALT = new TextEncoder().encode('clavitor-master-v2');
|
||||
|
||||
// Derive master key from raw PRF output and store in sessionStorage.
|
||||
// Returns true if stored, false if prfOutput is null/missing.
|
||||
async function storeMasterKey(prfOutput) {
|
||||
if (!prfOutput || prfOutput.byteLength === 0) return false;
|
||||
var raw = new Uint8Array(prfOutput);
|
||||
var keyMaterial = await crypto.subtle.importKey(
|
||||
'raw', raw, { name: 'HKDF' }, false, ['deriveBits']
|
||||
);
|
||||
var masterBits = await crypto.subtle.deriveBits(
|
||||
{ name: 'HKDF', hash: 'SHA-256', salt: HKDF_SALT, info: new Uint8Array(0) },
|
||||
keyMaterial, 256
|
||||
);
|
||||
sessionStorage.setItem(SESSION_KEY, arrayBufferToBase64(masterBits));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check browser support for WebAuthn + PRF
|
||||
function isPRFSupported() {
|
||||
return !!(window.PublicKeyCredential &&
|
||||
typeof window.PublicKeyCredential.isUserVerifyingPlatformAuthenticatorAvailable === 'function');
|
||||
}
|
||||
|
||||
// Base64URL encode/decode helpers
|
||||
function b64urlEncode(buf) {
|
||||
var bytes = new Uint8Array(buf);
|
||||
var str = '';
|
||||
for (var i = 0; i < bytes.length; i++) {
|
||||
str += String.fromCharCode(bytes[i]);
|
||||
}
|
||||
return btoa(str).replace(/\+/g, '-').replace(/\//g, '_').replace(/=+$/, '');
|
||||
}
|
||||
|
||||
function b64urlDecode(str) {
|
||||
str = str.replace(/-/g, '+').replace(/_/g, '/');
|
||||
while (str.length % 4) str += '=';
|
||||
var binary = atob(str);
|
||||
var bytes = new Uint8Array(binary.length);
|
||||
for (var i = 0; i < binary.length; i++) {
|
||||
bytes[i] = binary.charCodeAt(i);
|
||||
}
|
||||
return bytes.buffer;
|
||||
}
|
||||
|
||||
function arrayBufferToBase64(buf) {
|
||||
var bytes = new Uint8Array(buf);
|
||||
var str = '';
|
||||
for (var i = 0; i < bytes.length; i++) {
|
||||
str += String.fromCharCode(bytes[i]);
|
||||
}
|
||||
return btoa(str);
|
||||
}
|
||||
|
||||
function base64ToUint8(b64) {
|
||||
var binary = atob(b64);
|
||||
var bytes = new Uint8Array(binary.length);
|
||||
for (var i = 0; i < binary.length; i++) {
|
||||
bytes[i] = binary.charCodeAt(i);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// Register a passkey with PRF extension
|
||||
async function registerPasskey(name) {
|
||||
var headers = { 'Content-Type': 'application/json' };
|
||||
var bearer = (typeof getL1Bearer === 'function') ? getL1Bearer() : null;
|
||||
if (bearer) headers['Authorization'] = 'Bearer ' + bearer;
|
||||
|
||||
var res = await fetch('/api/webauthn/register/begin', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: '{}'
|
||||
});
|
||||
var options = await res.json();
|
||||
|
||||
var challenge = new Uint8Array(options.publicKey.challenge);
|
||||
var userId = new Uint8Array(options.publicKey.user.id);
|
||||
|
||||
var prfSalt = new Uint8Array(32);
|
||||
crypto.getRandomValues(prfSalt);
|
||||
|
||||
var createOptions = {
|
||||
publicKey: {
|
||||
challenge: challenge,
|
||||
rp: options.publicKey.rp,
|
||||
user: {
|
||||
id: userId,
|
||||
name: options.publicKey.user.name,
|
||||
displayName: options.publicKey.user.displayName
|
||||
},
|
||||
pubKeyCredParams: options.publicKey.pubKeyCredParams,
|
||||
authenticatorSelection: options.publicKey.authenticatorSelection,
|
||||
extensions: {
|
||||
prf: {
|
||||
eval: { first: prfSalt }
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var credential = await navigator.credentials.create(createOptions);
|
||||
var extResults = credential.getClientExtensionResults();
|
||||
var prfEnabled = extResults.prf && extResults.prf.enabled;
|
||||
|
||||
var regRes = await fetch('/api/webauthn/register/complete', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: JSON.stringify({
|
||||
cred_id: b64urlEncode(credential.rawId),
|
||||
public_key: Array.from(new Uint8Array(credential.response.getPublicKey ? credential.response.getPublicKey() : new ArrayBuffer(0))),
|
||||
prf_salt: Array.from(prfSalt),
|
||||
name: name || 'Security Key'
|
||||
})
|
||||
});
|
||||
|
||||
var result = await regRes.json();
|
||||
result.prf_supported = prfEnabled;
|
||||
|
||||
// Registration = unlocked. Derive master key from PRF output if available.
|
||||
var prfResults = extResults.prf && extResults.prf.results;
|
||||
if (prfResults && prfResults.first) {
|
||||
await storeMasterKey(prfResults.first);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Unlock vault using WebAuthn PRF → derive 32-byte master secret
|
||||
async function unlock() {
|
||||
var headers = { 'Content-Type': 'application/json' };
|
||||
var bearer = (typeof getL1Bearer === 'function') ? getL1Bearer() : null;
|
||||
if (bearer) headers['Authorization'] = 'Bearer ' + bearer;
|
||||
|
||||
var res = await fetch('/api/webauthn/auth/begin', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: '{}'
|
||||
});
|
||||
var options = await res.json();
|
||||
|
||||
if (!options.publicKey || !options.publicKey.allowCredentials || options.publicKey.allowCredentials.length === 0) {
|
||||
throw new Error('No registered passkeys found');
|
||||
}
|
||||
|
||||
var challenge = new Uint8Array(options.publicKey.challenge);
|
||||
var allowCreds = options.publicKey.allowCredentials.map(function(c) {
|
||||
return { type: c.type, id: b64urlDecode(c.id) };
|
||||
});
|
||||
|
||||
var prfExt = {};
|
||||
if (options.publicKey.extensions && options.publicKey.extensions.prf && options.publicKey.extensions.prf.eval) {
|
||||
var saltArr = options.publicKey.extensions.prf.eval.first;
|
||||
prfExt = { prf: { eval: { first: new Uint8Array(saltArr) } } };
|
||||
}
|
||||
|
||||
var assertion = await navigator.credentials.get({
|
||||
publicKey: {
|
||||
challenge: challenge,
|
||||
allowCredentials: allowCreds,
|
||||
userVerification: 'required',
|
||||
extensions: prfExt
|
||||
}
|
||||
});
|
||||
|
||||
var extResults = assertion.getClientExtensionResults();
|
||||
if (!extResults.prf || !extResults.prf.results || !extResults.prf.results.first) {
|
||||
throw new Error('PRF extension not supported or no result returned');
|
||||
}
|
||||
|
||||
await storeMasterKey(extResults.prf.results.first);
|
||||
|
||||
// Notify server
|
||||
await fetch('/api/webauthn/auth/complete', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: JSON.stringify({
|
||||
cred_id: b64urlEncode(assertion.rawId),
|
||||
sign_count: 0
|
||||
})
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Get L2 key (first 16 bytes of master) as Uint8Array
|
||||
function getL2Key() {
|
||||
var masterB64 = sessionStorage.getItem(SESSION_KEY);
|
||||
if (!masterB64) return null;
|
||||
var master = base64ToUint8(masterB64);
|
||||
return master.slice(0, 16);
|
||||
}
|
||||
|
||||
// Get L3 key (full 32 bytes of master) as Uint8Array
|
||||
function getL3Key() {
|
||||
var masterB64 = sessionStorage.getItem(SESSION_KEY);
|
||||
if (!masterB64) return null;
|
||||
return base64ToUint8(masterB64);
|
||||
}
|
||||
|
||||
// Get vault ID (first 8 bytes of master, base64-encoded)
|
||||
function getVaultId() {
|
||||
var masterB64 = sessionStorage.getItem(SESSION_KEY);
|
||||
if (!masterB64) return null;
|
||||
var master = base64ToUint8(masterB64);
|
||||
return arrayBufferToBase64(master.slice(0, 8).buffer);
|
||||
}
|
||||
|
||||
// Encrypt a field value (uses shared crypto.js)
|
||||
// Tier determined by key length: L2 = 16 bytes, L3 = 32 bytes
|
||||
async function encryptField(fieldLabel, plaintext, tier) {
|
||||
var key = (tier === 3) ? getL3Key() : getL2Key();
|
||||
if (!key) throw new Error('Vault not unlocked');
|
||||
return clavitor.crypto.encrypt_field(key, fieldLabel, plaintext);
|
||||
}
|
||||
|
||||
// Decrypt a field value (uses shared crypto.js)
|
||||
async function decryptField(fieldLabel, ciphertextB64, tier) {
|
||||
var key = (tier === 3) ? getL3Key() : getL2Key();
|
||||
if (!key) throw new Error('Vault not unlocked');
|
||||
return clavitor.crypto.decrypt_field(key, fieldLabel, ciphertextB64);
|
||||
}
|
||||
|
||||
// Backward compat
|
||||
async function encryptL2Field(entryId, fieldLabel, pt) { return encryptField(fieldLabel, pt, 2); }
|
||||
async function decryptL2Field(entryId, fieldLabel, ct) { return decryptField(fieldLabel, ct, 2); }
|
||||
async function encryptL3Field(entryId, fieldLabel, pt) { return encryptField(fieldLabel, pt, 3); }
|
||||
async function decryptL3Field(entryId, fieldLabel, ct) { return decryptField(fieldLabel, ct, 3); }
|
||||
|
||||
// Check if vault is unlocked (any tier)
|
||||
function isUnlocked() {
|
||||
return !!sessionStorage.getItem(SESSION_KEY);
|
||||
}
|
||||
|
||||
// Lock vault
|
||||
function lock() {
|
||||
sessionStorage.removeItem(SESSION_KEY);
|
||||
}
|
||||
|
||||
// Backward compatibility aliases
|
||||
var unlockL2 = unlock;
|
||||
var isL2Unlocked = isUnlocked;
|
||||
var lockL2 = lock;
|
||||
|
||||
// Export public API
|
||||
window.ClavitorWebAuthn = {
|
||||
isPRFSupported: isPRFSupported,
|
||||
storeMasterKey: storeMasterKey,
|
||||
registerPasskey: registerPasskey,
|
||||
unlock: unlock,
|
||||
unlockL2: unlockL2,
|
||||
getL2Key: getL2Key,
|
||||
getL3Key: getL3Key,
|
||||
getVaultId: getVaultId,
|
||||
encryptField: encryptField,
|
||||
decryptField: decryptField,
|
||||
encryptL2Field: encryptL2Field,
|
||||
decryptL2Field: decryptL2Field,
|
||||
encryptL3Field: encryptL3Field,
|
||||
decryptL3Field: decryptL3Field,
|
||||
isUnlocked: isUnlocked,
|
||||
isL2Unlocked: isL2Unlocked,
|
||||
lock: lock,
|
||||
lockL2: lockL2,
|
||||
b64urlEncode: b64urlEncode
|
||||
};
|
||||
|
||||
})(window);
|
||||
|
|
@ -1,182 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackupInfo describes a single backup file.
|
||||
type BackupInfo struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
CreatedAt int64 `json:"created_at"` // unix seconds
|
||||
}
|
||||
|
||||
// BackupDir returns the backup directory for a given data dir, creating it if needed.
|
||||
func BackupDir(dataDir string) string {
|
||||
dir := filepath.Join(dataDir, "backups")
|
||||
os.MkdirAll(dir, 0700)
|
||||
return dir
|
||||
}
|
||||
|
||||
// RunBackups backs up vaults whose ID falls in this hour's byte range.
|
||||
// First byte of vault ID (0x00–0xFF) is mapped across the 168 hours of a week,
|
||||
// so a POP with thousands of vaults spreads the work evenly.
|
||||
// Retention: delete backups older than 3 months.
|
||||
func RunBackups(dataDir string) {
|
||||
backupDir := BackupDir(dataDir)
|
||||
now := time.Now()
|
||||
|
||||
// Current hour of the week: Sun 0:00 = 0, Sat 23:00 = 167
|
||||
weekHour := int(now.Weekday())*24 + now.Hour()
|
||||
|
||||
// This hour's first-byte range: [lo, hi)
|
||||
lo := weekHour * 256 / 168
|
||||
hi := (weekHour + 1) * 256 / 168
|
||||
|
||||
dbs, _ := filepath.Glob(filepath.Join(dataDir, "*.db"))
|
||||
for _, dbPath := range dbs {
|
||||
name := strings.TrimSuffix(filepath.Base(dbPath), ".db")
|
||||
if len(name) < 2 {
|
||||
continue
|
||||
}
|
||||
b, err := hex.DecodeString(name[:2])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
slot := int(b[0])
|
||||
if slot < lo || slot >= hi {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if already backed up in the last 6 days
|
||||
if hasRecentBackup(backupDir, name, 6*24*time.Hour) {
|
||||
continue
|
||||
}
|
||||
|
||||
createBackup(dbPath, backupDir, now)
|
||||
}
|
||||
|
||||
pruneBackups(backupDir, 90*24*time.Hour)
|
||||
}
|
||||
|
||||
// hasRecentBackup checks if a vault has a backup newer than maxAge.
|
||||
func hasRecentBackup(backupDir, vaultID string, maxAge time.Duration) bool {
|
||||
files, _ := filepath.Glob(filepath.Join(backupDir, vaultID+"_*.db"))
|
||||
for _, f := range files {
|
||||
info, err := os.Stat(f)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if time.Since(info.ModTime()) < maxAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// createBackup copies a DB using VACUUM INTO (consistent, compacted snapshot).
|
||||
func createBackup(dbPath, backupDir string, now time.Time) error {
|
||||
name := strings.TrimSuffix(filepath.Base(dbPath), ".db")
|
||||
stamp := now.Format("20060102-150405")
|
||||
dest := filepath.Join(backupDir, fmt.Sprintf("%s_%s.db", name, stamp))
|
||||
|
||||
src, err := OpenDB(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
_, err = src.Conn.Exec(fmt.Sprintf("VACUUM INTO '%s'", dest))
|
||||
return err
|
||||
}
|
||||
|
||||
// pruneBackups deletes all backup files older than maxAge.
|
||||
func pruneBackups(backupDir string, maxAge time.Duration) {
|
||||
files, _ := filepath.Glob(filepath.Join(backupDir, "*.db"))
|
||||
cutoff := time.Now().Add(-maxAge)
|
||||
for _, f := range files {
|
||||
info, err := os.Stat(f)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.ModTime().Before(cutoff) {
|
||||
os.Remove(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListBackups returns all backup files sorted newest first.
|
||||
func ListBackups(dataDir string) []BackupInfo {
|
||||
backupDir := BackupDir(dataDir)
|
||||
files, _ := filepath.Glob(filepath.Join(backupDir, "*.db"))
|
||||
|
||||
var backups []BackupInfo
|
||||
for _, f := range files {
|
||||
info, err := os.Stat(f)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
backups = append(backups, BackupInfo{
|
||||
Name: filepath.Base(f),
|
||||
Size: info.Size(),
|
||||
CreatedAt: info.ModTime().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(backups, func(i, j int) bool {
|
||||
return backups[i].CreatedAt > backups[j].CreatedAt
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
// RestoreBackup replaces the active DB with a backup file.
|
||||
// The current DB is saved as a pre-restore backup first.
|
||||
func RestoreBackup(dataDir, backupName string) error {
|
||||
backupDir := BackupDir(dataDir)
|
||||
backupPath := filepath.Join(backupDir, backupName)
|
||||
|
||||
if strings.Contains(backupName, "/") || strings.Contains(backupName, "\\") || strings.Contains(backupName, "..") {
|
||||
return fmt.Errorf("invalid backup name")
|
||||
}
|
||||
if _, err := os.Stat(backupPath); err != nil {
|
||||
return fmt.Errorf("backup not found")
|
||||
}
|
||||
|
||||
parts := strings.SplitN(backupName, "_", 2)
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid backup name format")
|
||||
}
|
||||
dbPath := filepath.Join(dataDir, parts[0]+".db")
|
||||
|
||||
// Save current DB as pre-restore backup
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
preRestore := filepath.Join(backupDir, fmt.Sprintf("%s_prerestore_%s.db", parts[0], time.Now().Format("20060102-150405")))
|
||||
src, err := OpenDB(dbPath)
|
||||
if err == nil {
|
||||
src.Conn.Exec(fmt.Sprintf("VACUUM INTO '%s'", preRestore))
|
||||
src.Close()
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(backupPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read backup: %w", err)
|
||||
}
|
||||
return os.WriteFile(dbPath, data, 0600)
|
||||
}
|
||||
|
||||
// StartBackupTimer checks for due backups every hour.
|
||||
func StartBackupTimer(dataDir string) {
|
||||
go func() {
|
||||
for {
|
||||
RunBackups(dataDir)
|
||||
time.Sleep(1 * time.Hour)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Config holds application configuration.
|
||||
type Config struct {
|
||||
Port string // default "1984"
|
||||
DataDir string // directory for vault DB files
|
||||
SessionTTL int64 // default 86400 (24 hours)
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration from environment variables.
|
||||
func LoadConfig() (*Config, error) {
|
||||
dataDir := os.Getenv("DATA_DIR")
|
||||
if dataDir == "" {
|
||||
dataDir = "."
|
||||
}
|
||||
|
||||
return &Config{
|
||||
Port: "443",
|
||||
DataDir: dataDir,
|
||||
SessionTTL: 86400,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1,153 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDecryptionFailed = errors.New("decryption failed")
|
||||
ErrInvalidCiphertext = errors.New("invalid ciphertext")
|
||||
)
|
||||
|
||||
// NormalizeKey doubles an 8-byte key to 16 bytes for AES-128.
|
||||
// Mirrors normalize_key() in crypto/crypto.js.
|
||||
// 16 and 32 byte keys pass through unchanged.
|
||||
func NormalizeKey(key []byte) []byte {
|
||||
if len(key) == 8 {
|
||||
doubled := make([]byte, 16)
|
||||
copy(doubled[:8], key)
|
||||
copy(doubled[8:], key)
|
||||
return doubled
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// DeriveEntryKey derives a per-entry AES key from the L1 key using HKDF-SHA256.
|
||||
// L1 (8 bytes) is normalized to 16 → derives 16-byte key → AES-128-GCM.
|
||||
func DeriveEntryKey(l1Key []byte, entryID int64) ([]byte, error) {
|
||||
normalized := NormalizeKey(l1Key)
|
||||
info := []byte("clavitor-entry-" + IDToHex(entryID))
|
||||
reader := hkdf.New(sha256.New, normalized, nil, info)
|
||||
key := make([]byte, len(normalized)) // 16 bytes for AES-128
|
||||
if _, err := io.ReadFull(reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// DeriveHMACKey derives a separate HMAC key for blind indexes from L1.
|
||||
func DeriveHMACKey(l1Key []byte) ([]byte, error) {
|
||||
normalized := NormalizeKey(l1Key)
|
||||
info := []byte("clavitor-hmac-index")
|
||||
reader := hkdf.New(sha256.New, normalized, nil, info)
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// BlindIndex computes an HMAC-SHA256 blind index for searchable encrypted fields.
|
||||
// Returns truncated hash (16 bytes) for storage efficiency.
|
||||
func BlindIndex(hmacKey []byte, plaintext string) []byte {
|
||||
h := hmac.New(sha256.New, hmacKey)
|
||||
h.Write([]byte(plaintext))
|
||||
return h.Sum(nil)[:16] // truncate to 16 bytes
|
||||
}
|
||||
|
||||
// Pack compresses with zstd then encrypts with AES-GCM (random nonce).
|
||||
// Key size determines AES variant: 16=AES-128, 32=AES-256.
|
||||
func Pack(key []byte, plaintext string) ([]byte, error) {
|
||||
compressed, err := zstdCompress([]byte(plaintext))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return gcm.Seal(nonce, nonce, compressed, nil), nil
|
||||
}
|
||||
|
||||
// Unpack decrypts AES-GCM then decompresses zstd.
|
||||
func Unpack(key []byte, ciphertext []byte) (string, error) {
|
||||
if len(ciphertext) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nonceSize := gcm.NonceSize()
|
||||
if len(ciphertext) < nonceSize {
|
||||
return "", ErrInvalidCiphertext
|
||||
}
|
||||
|
||||
nonce, ct := ciphertext[:nonceSize], ciphertext[nonceSize:]
|
||||
compressed, err := gcm.Open(nil, nonce, ct, nil)
|
||||
if err != nil {
|
||||
return "", ErrDecryptionFailed
|
||||
}
|
||||
|
||||
decompressed, err := zstdDecompress(compressed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(decompressed), nil
|
||||
}
|
||||
|
||||
// zstd encoder/decoder (reusable, goroutine-safe)
|
||||
var (
|
||||
zstdEncoder, _ = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
||||
zstdDecoder, _ = zstd.NewReader(nil)
|
||||
)
|
||||
|
||||
func zstdCompress(data []byte) ([]byte, error) {
|
||||
return zstdEncoder.EncodeAll(data, nil), nil
|
||||
}
|
||||
|
||||
func zstdDecompress(data []byte) ([]byte, error) {
|
||||
return zstdDecoder.DecodeAll(data, nil)
|
||||
}
|
||||
|
||||
// GenerateToken generates a random hex token (32 bytes = 64 hex chars).
|
||||
func GenerateToken() string {
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
const hex = "0123456789abcdef"
|
||||
result := make([]byte, 64)
|
||||
for i, v := range b {
|
||||
result[i*2] = hex[v>>4]
|
||||
result[i*2+1] = hex[v&0x0f]
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
|
@ -1,188 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNormalizeKey_8bytes(t *testing.T) {
|
||||
key := []byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
got := NormalizeKey(key)
|
||||
if len(got) != 16 {
|
||||
t.Fatalf("expected 16 bytes, got %d", len(got))
|
||||
}
|
||||
// First 8 bytes = original, second 8 bytes = copy
|
||||
for i := 0; i < 8; i++ {
|
||||
if got[i] != key[i] {
|
||||
t.Errorf("byte %d: expected %d, got %d", i, key[i], got[i])
|
||||
}
|
||||
if got[i+8] != key[i] {
|
||||
t.Errorf("byte %d: expected %d (doubled), got %d", i+8, key[i], got[i+8])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeKey_16bytes_passthrough(t *testing.T) {
|
||||
key := make([]byte, 16)
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
got := NormalizeKey(key)
|
||||
if len(got) != 16 {
|
||||
t.Fatalf("expected 16 bytes, got %d", len(got))
|
||||
}
|
||||
for i := range key {
|
||||
if got[i] != key[i] {
|
||||
t.Errorf("byte %d changed: expected %d, got %d", i, key[i], got[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeKey_32bytes_passthrough(t *testing.T) {
|
||||
key := make([]byte, 32)
|
||||
got := NormalizeKey(key)
|
||||
if len(got) != 32 {
|
||||
t.Fatalf("expected 32 bytes, got %d", len(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveEntryKey_deterministic(t *testing.T) {
|
||||
l1 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
|
||||
key1, err := DeriveEntryKey(l1, 12345)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key2, err := DeriveEntryKey(l1, 12345)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(key1) != string(key2) {
|
||||
t.Error("same inputs must produce same key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveEntryKey_different_entries_different_keys(t *testing.T) {
|
||||
l1 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
|
||||
key1, _ := DeriveEntryKey(l1, 1)
|
||||
key2, _ := DeriveEntryKey(l1, 2)
|
||||
if string(key1) == string(key2) {
|
||||
t.Error("different entry IDs must produce different keys")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeriveHMACKey_deterministic(t *testing.T) {
|
||||
l1 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
|
||||
key1, _ := DeriveHMACKey(l1)
|
||||
key2, _ := DeriveHMACKey(l1)
|
||||
if string(key1) != string(key2) {
|
||||
t.Error("same L1 must produce same HMAC key")
|
||||
}
|
||||
if len(key1) != 32 {
|
||||
t.Errorf("HMAC key should be 32 bytes, got %d", len(key1))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlindIndex_deterministic(t *testing.T) {
|
||||
hmacKey := make([]byte, 32)
|
||||
idx1 := BlindIndex(hmacKey, "test")
|
||||
idx2 := BlindIndex(hmacKey, "test")
|
||||
if string(idx1) != string(idx2) {
|
||||
t.Error("same input must produce same blind index")
|
||||
}
|
||||
if len(idx1) != 16 {
|
||||
t.Errorf("blind index should be 16 bytes, got %d", len(idx1))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlindIndex_different_inputs(t *testing.T) {
|
||||
hmacKey := make([]byte, 32)
|
||||
idx1 := BlindIndex(hmacKey, "apple")
|
||||
idx2 := BlindIndex(hmacKey, "orange")
|
||||
if string(idx1) == string(idx2) {
|
||||
t.Error("different inputs should produce different blind indexes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackUnpack_roundtrip(t *testing.T) {
|
||||
key := make([]byte, 16) // AES-128
|
||||
for i := range key {
|
||||
key[i] = byte(i + 1)
|
||||
}
|
||||
|
||||
original := `{"title":"GitHub","fields":[{"label":"password","value":"hunter2"}]}`
|
||||
packed, err := Pack(key, original)
|
||||
if err != nil {
|
||||
t.Fatalf("Pack: %v", err)
|
||||
}
|
||||
|
||||
if string(packed) == original {
|
||||
t.Fatal("Pack must not return plaintext")
|
||||
}
|
||||
|
||||
unpacked, err := Unpack(key, packed)
|
||||
if err != nil {
|
||||
t.Fatalf("Unpack: %v", err)
|
||||
}
|
||||
|
||||
if unpacked != original {
|
||||
t.Errorf("roundtrip failed:\n want: %s\n got: %s", original, unpacked)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackUnpack_wrong_key_fails(t *testing.T) {
|
||||
key1 := make([]byte, 16)
|
||||
key2 := make([]byte, 16)
|
||||
key2[0] = 0xFF
|
||||
|
||||
packed, err := Pack(key1, "secret data")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = Unpack(key2, packed)
|
||||
if err == nil {
|
||||
t.Fatal("Unpack with wrong key should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackUnpack_empty_string(t *testing.T) {
|
||||
key := make([]byte, 16)
|
||||
result, err := Unpack(key, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if result != "" {
|
||||
t.Errorf("Unpack of nil should return empty string, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackUnpack_AES256(t *testing.T) {
|
||||
key := make([]byte, 32) // AES-256
|
||||
for i := range key {
|
||||
key[i] = byte(i)
|
||||
}
|
||||
|
||||
original := "AES-256 test payload"
|
||||
packed, err := Pack(key, original)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
unpacked, err := Unpack(key, packed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if unpacked != original {
|
||||
t.Errorf("AES-256 roundtrip failed: got %q", unpacked)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateToken_length_and_uniqueness(t *testing.T) {
|
||||
t1 := GenerateToken()
|
||||
t2 := GenerateToken()
|
||||
if len(t1) != 64 {
|
||||
t.Errorf("token should be 64 hex chars, got %d", len(t1))
|
||||
}
|
||||
if t1 == t2 {
|
||||
t.Error("two generated tokens should not be equal")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,848 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrVersionConflict = errors.New("version conflict: entry was modified")
|
||||
)
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS entries (
|
||||
entry_id INTEGER PRIMARY KEY,
|
||||
parent_id INTEGER NOT NULL DEFAULT 0,
|
||||
type TEXT NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
title_idx BLOB NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
data_level INTEGER NOT NULL DEFAULT 1,
|
||||
scopes TEXT NOT NULL DEFAULT '0000',
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL,
|
||||
version INTEGER NOT NULL DEFAULT 1,
|
||||
deleted_at INTEGER,
|
||||
checksum INTEGER
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_parent ON entries(parent_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_type ON entries(type);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_title_idx ON entries(title_idx);
|
||||
CREATE INDEX IF NOT EXISTS idx_entries_deleted ON entries(deleted_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
token TEXT PRIMARY KEY,
|
||||
created_at INTEGER NOT NULL,
|
||||
expires_at INTEGER NOT NULL,
|
||||
actor TEXT NOT NULL DEFAULT 'web'
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS audit_log (
|
||||
event_id INTEGER PRIMARY KEY,
|
||||
entry_id INTEGER,
|
||||
title TEXT,
|
||||
action TEXT NOT NULL,
|
||||
actor TEXT NOT NULL,
|
||||
ip_addr TEXT,
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_entry ON audit_log(entry_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_audit_created ON audit_log(created_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS webauthn_credentials (
|
||||
cred_id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
public_key BLOB NOT NULL,
|
||||
credential_id BLOB NOT NULL DEFAULT X'',
|
||||
prf_salt BLOB NOT NULL,
|
||||
sign_count INTEGER NOT NULL DEFAULT 0,
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS webauthn_challenges (
|
||||
challenge BLOB PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
token_hash TEXT UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
scopes TEXT NOT NULL DEFAULT '',
|
||||
all_access INTEGER NOT NULL DEFAULT 0,
|
||||
admin INTEGER NOT NULL DEFAULT 0,
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_agents_token ON agents(token_hash);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS vault_lock (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
locked INTEGER DEFAULT 0,
|
||||
locked_reason TEXT,
|
||||
locked_at INTEGER DEFAULT 0
|
||||
);
|
||||
`
|
||||
|
||||
// OpenDB opens the SQLite database.
|
||||
func OpenDB(dbPath string) (*DB, error) {
|
||||
conn, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_foreign_keys=ON&_busy_timeout=5000")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open db: %w", err)
|
||||
}
|
||||
if err := conn.Ping(); err != nil {
|
||||
return nil, fmt.Errorf("ping db: %w", err)
|
||||
}
|
||||
return &DB{Conn: conn, DBPath: dbPath}, nil
|
||||
}
|
||||
|
||||
// MigrateDB runs the schema migrations.
|
||||
func MigrateDB(db *DB) error {
|
||||
if _, err := db.Conn.Exec(schema); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migration: add credential_id column if missing (existing DBs)
|
||||
_, err := db.Conn.Exec(`ALTER TABLE webauthn_credentials ADD COLUMN credential_id BLOB NOT NULL DEFAULT X''`)
|
||||
if err != nil && !strings.Contains(err.Error(), "duplicate column") {
|
||||
// Ignore "duplicate column" — migration already applied
|
||||
}
|
||||
// Seed vault_lock row
|
||||
db.Conn.Exec(`INSERT OR IGNORE INTO vault_lock (id) VALUES (1)`)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the database connection.
|
||||
func (db *DB) Close() error {
|
||||
return db.Conn.Close()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Entry operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// EntryCreate creates a new entry.
|
||||
func EntryCreate(db *DB, vaultKey []byte, e *Entry) error {
|
||||
if e.EntryID == 0 {
|
||||
e.EntryID = HexID(NewID())
|
||||
}
|
||||
|
||||
now := time.Now().UnixMilli()
|
||||
e.CreatedAt = now
|
||||
e.UpdatedAt = now
|
||||
e.Version = 1
|
||||
if e.DataLevel == 0 {
|
||||
e.DataLevel = DataLevelL1
|
||||
}
|
||||
if e.Scopes == "" {
|
||||
e.Scopes = ScopeOwner
|
||||
}
|
||||
|
||||
// Derive keys and encrypt
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hmacKey, err := DeriveHMACKey(vaultKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create blind index for title
|
||||
e.TitleIdx = BlindIndex(hmacKey, strings.ToLower(e.Title))
|
||||
|
||||
// Pack VaultData if present
|
||||
if e.VaultData != nil {
|
||||
dataJSON, err := json.Marshal(e.VaultData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(dataJSON))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.Data = packed
|
||||
}
|
||||
|
||||
_, err = db.Conn.Exec(
|
||||
`INSERT INTO entries (entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
int64(e.EntryID), int64(e.ParentID), e.Type, e.Title, e.TitleIdx, e.Data, e.DataLevel, e.Scopes, e.CreatedAt, e.UpdatedAt, e.Version,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// EntryGet retrieves an entry by ID.
|
||||
func EntryGet(db *DB, vaultKey []byte, entryID int64) (*Entry, error) {
|
||||
var e Entry
|
||||
var deletedAt sql.NullInt64
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version, deleted_at
|
||||
FROM entries WHERE entry_id = ?`, entryID,
|
||||
).Scan(&e.EntryID, &e.ParentID, &e.Type, &e.Title, &e.TitleIdx, &e.Data, &e.DataLevel, &e.Scopes, &e.CreatedAt, &e.UpdatedAt, &e.Version, &deletedAt)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if deletedAt.Valid {
|
||||
v := deletedAt.Int64
|
||||
e.DeletedAt = &v
|
||||
}
|
||||
|
||||
// Unpack data
|
||||
if len(e.Data) > 0 && e.DataLevel == DataLevelL1 {
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var vd VaultData
|
||||
if err := json.Unmarshal([]byte(dataText), &vd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.VaultData = &vd
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
}
|
||||
|
||||
// EntryUpdate updates an existing entry with optimistic locking.
|
||||
func EntryUpdate(db *DB, vaultKey []byte, e *Entry) error {
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
// Derive keys
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hmacKey, err := DeriveHMACKey(vaultKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update blind index
|
||||
e.TitleIdx = BlindIndex(hmacKey, strings.ToLower(e.Title))
|
||||
|
||||
// Pack VaultData if present
|
||||
if e.VaultData != nil {
|
||||
dataJSON, err := json.Marshal(e.VaultData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(dataJSON))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.Data = packed
|
||||
}
|
||||
|
||||
result, err := db.Conn.Exec(
|
||||
`UPDATE entries SET parent_id=?, type=?, title=?, title_idx=?, data=?, data_level=?, scopes=?, updated_at=?, version=version+1
|
||||
WHERE entry_id = ? AND version = ? AND deleted_at IS NULL`,
|
||||
int64(e.ParentID), e.Type, e.Title, e.TitleIdx, e.Data, e.DataLevel, e.Scopes, now,
|
||||
int64(e.EntryID), e.Version,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if affected == 0 {
|
||||
return ErrVersionConflict
|
||||
}
|
||||
e.Version++
|
||||
e.UpdatedAt = now
|
||||
return nil
|
||||
}
|
||||
|
||||
// EntryDelete soft-deletes an entry.
|
||||
func EntryDelete(db *DB, entryID int64) error {
|
||||
now := time.Now().UnixMilli()
|
||||
result, err := db.Conn.Exec(
|
||||
`UPDATE entries SET deleted_at = ?, updated_at = ? WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
now, now, entryID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if affected == 0 {
|
||||
return ErrNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EntryList returns all non-deleted entries, optionally filtered by parent.
|
||||
func EntryList(db *DB, vaultKey []byte, parentID *int64) ([]Entry, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
|
||||
if parentID != nil {
|
||||
rows, err = db.Conn.Query(
|
||||
`SELECT entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version
|
||||
FROM entries WHERE deleted_at IS NULL AND parent_id = ? ORDER BY type, title`, *parentID,
|
||||
)
|
||||
} else {
|
||||
rows, err = db.Conn.Query(
|
||||
`SELECT entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version
|
||||
FROM entries WHERE deleted_at IS NULL ORDER BY type, title`,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []Entry
|
||||
for rows.Next() {
|
||||
var e Entry
|
||||
if err := rows.Scan(&e.EntryID, &e.ParentID, &e.Type, &e.Title, &e.TitleIdx, &e.Data, &e.DataLevel, &e.Scopes, &e.CreatedAt, &e.UpdatedAt, &e.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Unpack L1 data
|
||||
if len(e.Data) > 0 && e.DataLevel == DataLevelL1 {
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err == nil {
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err == nil {
|
||||
var vd VaultData
|
||||
if json.Unmarshal([]byte(dataText), &vd) == nil {
|
||||
e.VaultData = &vd
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// EntryListMeta returns entry metadata only — no decryption, no field data.
|
||||
// Used for list views. Individual entries fetched on demand via EntryGet.
|
||||
func EntryListMeta(db *DB) ([]Entry, error) {
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT entry_id, parent_id, type, title, data_level, scopes, created_at, updated_at, version
|
||||
FROM entries WHERE deleted_at IS NULL ORDER BY type, title`,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []Entry
|
||||
for rows.Next() {
|
||||
var e Entry
|
||||
if err := rows.Scan(&e.EntryID, &e.ParentID, &e.Type, &e.Title, &e.DataLevel, &e.Scopes, &e.CreatedAt, &e.UpdatedAt, &e.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// EntrySearch searches entries by title (blind index lookup).
|
||||
func EntrySearch(db *DB, vaultKey []byte, query string) ([]Entry, error) {
|
||||
hmacKey, err := DeriveHMACKey(vaultKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx := BlindIndex(hmacKey, strings.ToLower(query))
|
||||
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version
|
||||
FROM entries WHERE deleted_at IS NULL AND title_idx = ? ORDER BY title`, idx,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []Entry
|
||||
for rows.Next() {
|
||||
var e Entry
|
||||
if err := rows.Scan(&e.EntryID, &e.ParentID, &e.Type, &e.Title, &e.TitleIdx, &e.Data, &e.DataLevel, &e.Scopes, &e.CreatedAt, &e.UpdatedAt, &e.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(e.Data) > 0 && e.DataLevel == DataLevelL1 {
|
||||
entryKey, _ := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
dataText, _ := Unpack(entryKey, e.Data)
|
||||
var vd VaultData
|
||||
if json.Unmarshal([]byte(dataText), &vd) == nil {
|
||||
e.VaultData = &vd
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// EntrySearchFuzzy searches entries by title using LIKE (less secure but more practical).
|
||||
func EntrySearchFuzzy(db *DB, vaultKey []byte, query string) ([]Entry, error) {
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT entry_id, parent_id, type, title, title_idx, data, data_level, scopes, created_at, updated_at, version
|
||||
FROM entries WHERE deleted_at IS NULL AND title LIKE ? ORDER BY title`, "%"+query+"%",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []Entry
|
||||
for rows.Next() {
|
||||
var e Entry
|
||||
if err := rows.Scan(&e.EntryID, &e.ParentID, &e.Type, &e.Title, &e.TitleIdx, &e.Data, &e.DataLevel, &e.Scopes, &e.CreatedAt, &e.UpdatedAt, &e.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(e.Data) > 0 && e.DataLevel == DataLevelL1 {
|
||||
entryKey, _ := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
dataText, _ := Unpack(entryKey, e.Data)
|
||||
var vd VaultData
|
||||
if json.Unmarshal([]byte(dataText), &vd) == nil {
|
||||
e.VaultData = &vd
|
||||
}
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, rows.Err()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Session operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// SessionCreate creates a new session.
|
||||
func SessionCreate(db *DB, ttl int64, actor string) (*Session, error) {
|
||||
now := time.Now().UnixMilli()
|
||||
s := &Session{
|
||||
Token: GenerateToken(),
|
||||
CreatedAt: now,
|
||||
ExpiresAt: now + (ttl * 1000),
|
||||
Actor: actor,
|
||||
}
|
||||
_, err := db.Conn.Exec(
|
||||
`INSERT INTO sessions (token, created_at, expires_at, actor) VALUES (?, ?, ?, ?)`,
|
||||
s.Token, s.CreatedAt, s.ExpiresAt, s.Actor,
|
||||
)
|
||||
return s, err
|
||||
}
|
||||
|
||||
// SessionGet retrieves a session by token.
|
||||
func SessionGet(db *DB, token string) (*Session, error) {
|
||||
var s Session
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT token, created_at, expires_at, actor FROM sessions WHERE token = ?`, token,
|
||||
).Scan(&s.Token, &s.CreatedAt, &s.ExpiresAt, &s.Actor)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check expiry
|
||||
if s.ExpiresAt < time.Now().UnixMilli() {
|
||||
return nil, nil
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// SessionDelete deletes a session.
|
||||
func SessionDelete(db *DB, token string) error {
|
||||
_, err := db.Conn.Exec(`DELETE FROM sessions WHERE token = ?`, token)
|
||||
return err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Audit operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// AuditLog records an audit event.
|
||||
func AuditLog(db *DB, ev *AuditEvent) error {
|
||||
if ev.EventID == 0 {
|
||||
ev.EventID = HexID(NewID())
|
||||
}
|
||||
if ev.CreatedAt == 0 {
|
||||
ev.CreatedAt = time.Now().UnixMilli()
|
||||
}
|
||||
_, err := db.Conn.Exec(
|
||||
`INSERT INTO audit_log (event_id, entry_id, title, action, actor, ip_addr, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
int64(ev.EventID), int64(ev.EntryID), ev.Title, ev.Action, ev.Actor, ev.IPAddr, ev.CreatedAt,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// AuditList returns recent audit events.
|
||||
func AuditList(db *DB, limit int) ([]AuditEvent, error) {
|
||||
if limit <= 0 {
|
||||
limit = 100
|
||||
}
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT event_id, entry_id, title, action, actor, ip_addr, created_at
|
||||
FROM audit_log ORDER BY created_at DESC LIMIT ?`, limit,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var events []AuditEvent
|
||||
for rows.Next() {
|
||||
var ev AuditEvent
|
||||
var entryID sql.NullInt64
|
||||
var title, ipAddr sql.NullString
|
||||
if err := rows.Scan(&ev.EventID, &entryID, &title, &ev.Action, &ev.Actor, &ipAddr, &ev.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entryID.Valid {
|
||||
ev.EntryID = HexID(entryID.Int64)
|
||||
}
|
||||
if title.Valid {
|
||||
ev.Title = title.String
|
||||
}
|
||||
if ipAddr.Valid {
|
||||
ev.IPAddr = ipAddr.String
|
||||
}
|
||||
events = append(events, ev)
|
||||
}
|
||||
return events, rows.Err()
|
||||
}
|
||||
|
||||
// EntryCount returns total entry count (for health check).
|
||||
func EntryCount(db *DB) (int, error) {
|
||||
var count int
|
||||
err := db.Conn.QueryRow(`SELECT COUNT(*) FROM entries WHERE deleted_at IS NULL`).Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WebAuthn credential operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// StoreWebAuthnCredential inserts a new WebAuthn credential.
|
||||
func StoreWebAuthnCredential(db *DB, c *WebAuthnCredential) error {
|
||||
if c.CreatedAt == 0 {
|
||||
c.CreatedAt = time.Now().Unix()
|
||||
}
|
||||
_, err := db.Conn.Exec(
|
||||
`INSERT INTO webauthn_credentials (cred_id, name, public_key, credential_id, prf_salt, sign_count, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
int64(c.CredID), c.Name, c.PublicKey, c.CredentialID, c.PRFSalt, c.SignCount, c.CreatedAt,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetWebAuthnCredentials returns all registered WebAuthn credentials.
|
||||
func GetWebAuthnCredentials(db *DB) ([]WebAuthnCredential, error) {
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT cred_id, name, public_key, credential_id, prf_salt, sign_count, created_at
|
||||
FROM webauthn_credentials ORDER BY created_at DESC`,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var creds []WebAuthnCredential
|
||||
for rows.Next() {
|
||||
var c WebAuthnCredential
|
||||
if err := rows.Scan(&c.CredID, &c.Name, &c.PublicKey, &c.CredentialID, &c.PRFSalt, &c.SignCount, &c.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
creds = append(creds, c)
|
||||
}
|
||||
return creds, rows.Err()
|
||||
}
|
||||
|
||||
// WebAuthnCredentialCount returns the number of registered WebAuthn credentials.
|
||||
func WebAuthnCredentialCount(db *DB) (int, error) {
|
||||
var count int
|
||||
err := db.Conn.QueryRow(`SELECT COUNT(*) FROM webauthn_credentials`).Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
// GetFirstCredentialPublicKey returns the public key of the first registered credential.
|
||||
// Returns nil, nil if no credentials exist yet.
|
||||
func GetFirstCredentialPublicKey(db *DB) ([]byte, error) {
|
||||
var pubkey []byte
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT public_key FROM webauthn_credentials ORDER BY created_at ASC LIMIT 1`,
|
||||
).Scan(&pubkey)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
return pubkey, err
|
||||
}
|
||||
|
||||
// GetWebAuthnCredentialByRawID looks up a credential by its raw WebAuthn credential ID.
|
||||
func GetWebAuthnCredentialByRawID(db *DB, credentialID []byte) (*WebAuthnCredential, error) {
|
||||
var c WebAuthnCredential
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT cred_id, name, public_key, credential_id, prf_salt, sign_count, created_at
|
||||
FROM webauthn_credentials WHERE credential_id = ?`, credentialID,
|
||||
).Scan(&c.CredID, &c.Name, &c.PublicKey, &c.CredentialID, &c.PRFSalt, &c.SignCount, &c.CreatedAt)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
return &c, err
|
||||
}
|
||||
|
||||
// DeleteWebAuthnCredential removes a WebAuthn credential by ID.
|
||||
func DeleteWebAuthnCredential(db *DB, credID int64) error {
|
||||
result, err := db.Conn.Exec(`DELETE FROM webauthn_credentials WHERE cred_id = ?`, credID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if affected == 0 {
|
||||
return ErrNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateWebAuthnSignCount increments the sign count for a credential.
|
||||
func UpdateWebAuthnSignCount(db *DB, credID int64, count int) error {
|
||||
_, err := db.Conn.Exec(`UPDATE webauthn_credentials SET sign_count = ? WHERE cred_id = ?`, count, credID)
|
||||
return err
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// WebAuthn challenge operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// StoreWebAuthnChallenge stores a challenge for later verification.
|
||||
func StoreWebAuthnChallenge(db *DB, challenge []byte, challengeType string) error {
|
||||
_, err := db.Conn.Exec(
|
||||
`INSERT INTO webauthn_challenges (challenge, type, created_at) VALUES (?, ?, ?)`,
|
||||
challenge, challengeType, time.Now().Unix(),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// ConsumeWebAuthnChallenge verifies and removes a challenge. Returns error if not found or expired (5min TTL).
|
||||
func ConsumeWebAuthnChallenge(db *DB, challenge []byte, challengeType string) error {
|
||||
fiveMinAgo := time.Now().Unix() - 300
|
||||
result, err := db.Conn.Exec(
|
||||
`DELETE FROM webauthn_challenges WHERE challenge = ? AND type = ? AND created_at > ?`,
|
||||
challenge, challengeType, fiveMinAgo,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if affected == 0 {
|
||||
return errors.New("challenge not found or expired")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanExpiredChallenges removes challenges older than 5 minutes.
|
||||
func CleanExpiredChallenges(db *DB) {
|
||||
fiveMinAgo := time.Now().Unix() - 300
|
||||
db.Conn.Exec(`DELETE FROM webauthn_challenges WHERE created_at < ?`, fiveMinAgo)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Agent operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// AgentCreate creates a new agent and returns the raw bearer token (shown once).
|
||||
// The token embeds l1Raw (8 bytes) for vault routing + random bytes for identity.
|
||||
func AgentCreate(db *DB, name, scopes string, allAccess, admin bool, l1Raw []byte) (*Agent, string, error) {
|
||||
rawToken, tokenHash := MintToken(l1Raw)
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
allAccessInt := 0
|
||||
if allAccess {
|
||||
allAccessInt = 1
|
||||
}
|
||||
adminInt := 0
|
||||
if admin {
|
||||
adminInt = 1
|
||||
}
|
||||
|
||||
result, err := db.Conn.Exec(
|
||||
`INSERT INTO agents (token_hash, name, scopes, all_access, admin, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)`,
|
||||
tokenHash, name, scopes, allAccessInt, adminInt, now,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
id, _ := result.LastInsertId()
|
||||
a := &Agent{
|
||||
ID: id,
|
||||
TokenHash: tokenHash,
|
||||
Name: name,
|
||||
Scopes: scopes,
|
||||
AllAccess: allAccess,
|
||||
Admin: admin,
|
||||
CreatedAt: now,
|
||||
}
|
||||
|
||||
// If scopes is "auto", assign this agent's own ID as its scope
|
||||
if scopes == "auto" || scopes == "" {
|
||||
a.Scopes = a.ScopeHex()
|
||||
db.Conn.Exec(`UPDATE agents SET scopes = ? WHERE id = ?`, a.Scopes, id)
|
||||
}
|
||||
|
||||
return a, rawToken, nil
|
||||
}
|
||||
|
||||
// AgentGetByToken looks up an agent by sha256 token hash.
|
||||
func AgentGetByToken(db *DB, tokenHash string) (*Agent, error) {
|
||||
var a Agent
|
||||
var allAccess, admin int
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT id, token_hash, name, scopes, all_access, admin, created_at
|
||||
FROM agents WHERE token_hash = ?`, tokenHash,
|
||||
).Scan(&a.ID, &a.TokenHash, &a.Name, &a.Scopes, &allAccess, &admin, &a.CreatedAt)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.AllAccess = allAccess != 0
|
||||
a.Admin = admin != 0
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
// AgentGet returns an agent by ID.
|
||||
func AgentGet(db *DB, agentID int64) (*Agent, error) {
|
||||
var a Agent
|
||||
var allAccess, admin int
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT id, token_hash, name, scopes, all_access, admin, created_at
|
||||
FROM agents WHERE id = ?`, agentID,
|
||||
).Scan(&a.ID, &a.TokenHash, &a.Name, &a.Scopes, &allAccess, &admin, &a.CreatedAt)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.AllAccess = allAccess != 0
|
||||
a.Admin = admin != 0
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
// AgentList returns all agents. Never exposes token_hash.
|
||||
func AgentList(db *DB) ([]Agent, error) {
|
||||
rows, err := db.Conn.Query(
|
||||
`SELECT id, name, scopes, all_access, admin, created_at
|
||||
FROM agents ORDER BY id ASC`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var agents []Agent
|
||||
for rows.Next() {
|
||||
var a Agent
|
||||
var allAccess, admin int
|
||||
if err := rows.Scan(&a.ID, &a.Name, &a.Scopes, &allAccess, &admin, &a.CreatedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.AllAccess = allAccess != 0
|
||||
a.Admin = admin != 0
|
||||
agents = append(agents, a)
|
||||
}
|
||||
return agents, rows.Err()
|
||||
}
|
||||
|
||||
// AgentUpdate updates an agent's name, scopes, all_access, and admin flags.
|
||||
func AgentUpdate(db *DB, agentID int64, name, scopes string, allAccess, admin bool) error {
|
||||
allAccessInt := 0
|
||||
if allAccess {
|
||||
allAccessInt = 1
|
||||
}
|
||||
adminInt := 0
|
||||
if admin {
|
||||
adminInt = 1
|
||||
}
|
||||
_, err := db.Conn.Exec(
|
||||
`UPDATE agents SET name = ?, scopes = ?, all_access = ?, admin = ? WHERE id = ?`,
|
||||
name, scopes, allAccessInt, adminInt, agentID)
|
||||
return err
|
||||
}
|
||||
|
||||
// AgentDelete hard-deletes an agent.
|
||||
func AgentDelete(db *DB, agentID int64) error {
|
||||
_, err := db.Conn.Exec(`DELETE FROM agents WHERE id = ?`, agentID)
|
||||
return err
|
||||
}
|
||||
|
||||
// AgentCount returns the number of agents.
|
||||
func AgentCount(db *DB) (int, error) {
|
||||
var count int
|
||||
err := db.Conn.QueryRow(`SELECT COUNT(*) FROM agents`).Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
// EntryUpdateScopes updates only the scopes column of an entry.
|
||||
func EntryUpdateScopes(db *DB, entryID int64, scopes string) error {
|
||||
now := time.Now().UnixMilli()
|
||||
result, err := db.Conn.Exec(
|
||||
`UPDATE entries SET scopes = ?, updated_at = ? WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
scopes, now, entryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
affected, _ := result.RowsAffected()
|
||||
if affected == 0 {
|
||||
return ErrNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Vault lock operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// VaultLockGet returns the vault lock state.
|
||||
func VaultLockGet(db *DB) (*VaultLock, error) {
|
||||
var vl VaultLock
|
||||
var locked int
|
||||
err := db.Conn.QueryRow(`SELECT locked, COALESCE(locked_reason,''), locked_at FROM vault_lock WHERE id = 1`).
|
||||
Scan(&locked, &vl.LockedReason, &vl.LockedAt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vl.Locked = locked != 0
|
||||
return &vl, nil
|
||||
}
|
||||
|
||||
// VaultLockSet sets the vault lock state.
|
||||
func VaultLockSet(db *DB, locked bool, reason string) error {
|
||||
lockedInt := 0
|
||||
lockedAt := int64(0)
|
||||
if locked {
|
||||
lockedInt = 1
|
||||
lockedAt = time.Now().UnixMilli()
|
||||
}
|
||||
_, err := db.Conn.Exec(`UPDATE vault_lock SET locked = ?, locked_reason = ?, locked_at = ? WHERE id = 1`,
|
||||
lockedInt, reason, lockedAt)
|
||||
return err
|
||||
}
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// testDB creates a temp database, migrates it, returns DB + cleanup.
|
||||
func testDB(t *testing.T) *DB {
|
||||
t.Helper()
|
||||
db, err := OpenDB(t.TempDir() + "/test.db")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := MigrateDB(db); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(func() { db.Close() })
|
||||
return db
|
||||
}
|
||||
|
||||
// testVaultKey returns a fixed 16-byte key for testing.
|
||||
func testVaultKey() []byte {
|
||||
return []byte{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}
|
||||
}
|
||||
|
||||
func TestEntryCreate_and_Get(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
entry := &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: "GitHub",
|
||||
VaultData: &VaultData{
|
||||
Title: "GitHub",
|
||||
Type: "credential",
|
||||
Fields: []VaultField{
|
||||
{Label: "username", Value: "octocat", Kind: "text"},
|
||||
{Label: "password", Value: "ghp_abc123", Kind: "password"},
|
||||
},
|
||||
URLs: []string{"https://github.com"},
|
||||
},
|
||||
}
|
||||
|
||||
if err := EntryCreate(db, vk, entry); err != nil {
|
||||
t.Fatalf("create: %v", err)
|
||||
}
|
||||
if entry.EntryID == 0 {
|
||||
t.Fatal("entry ID should be assigned")
|
||||
}
|
||||
if entry.Version != 1 {
|
||||
t.Errorf("initial version should be 1, got %d", entry.Version)
|
||||
}
|
||||
|
||||
got, err := EntryGet(db, vk, int64(entry.EntryID))
|
||||
if err != nil {
|
||||
t.Fatalf("get: %v", err)
|
||||
}
|
||||
if got.Title != "GitHub" {
|
||||
t.Errorf("title = %q, want GitHub", got.Title)
|
||||
}
|
||||
if got.VaultData == nil {
|
||||
t.Fatal("VaultData should be unpacked")
|
||||
}
|
||||
if len(got.VaultData.Fields) != 2 {
|
||||
t.Fatalf("expected 2 fields, got %d", len(got.VaultData.Fields))
|
||||
}
|
||||
if got.VaultData.Fields[0].Value != "octocat" {
|
||||
t.Errorf("username = %q, want octocat", got.VaultData.Fields[0].Value)
|
||||
}
|
||||
if got.VaultData.Fields[1].Value != "ghp_abc123" {
|
||||
t.Errorf("password = %q, want ghp_abc123", got.VaultData.Fields[1].Value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntryUpdate_optimistic_locking(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
entry := &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: "Original",
|
||||
VaultData: &VaultData{Title: "Original", Type: "credential"},
|
||||
}
|
||||
EntryCreate(db, vk, entry)
|
||||
|
||||
// Update with correct version
|
||||
entry.Title = "Updated"
|
||||
entry.VaultData.Title = "Updated"
|
||||
if err := EntryUpdate(db, vk, entry); err != nil {
|
||||
t.Fatalf("update: %v", err)
|
||||
}
|
||||
if entry.Version != 2 {
|
||||
t.Errorf("version after update should be 2, got %d", entry.Version)
|
||||
}
|
||||
|
||||
// Update with stale version should fail
|
||||
entry.Version = 1 // stale
|
||||
entry.Title = "Stale"
|
||||
err := EntryUpdate(db, vk, entry)
|
||||
if err != ErrVersionConflict {
|
||||
t.Errorf("expected ErrVersionConflict, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntryDelete_soft_delete(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
entry := &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: "ToDelete",
|
||||
VaultData: &VaultData{Title: "ToDelete", Type: "credential"},
|
||||
}
|
||||
EntryCreate(db, vk, entry)
|
||||
|
||||
if err := EntryDelete(db, int64(entry.EntryID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Should not appear in list
|
||||
entries, err := EntryList(db, vk, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, e := range entries {
|
||||
if e.EntryID == entry.EntryID {
|
||||
t.Error("deleted entry should not appear in list")
|
||||
}
|
||||
}
|
||||
|
||||
// Direct get should still work but have DeletedAt set
|
||||
got, err := EntryGet(db, vk, int64(entry.EntryID))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.DeletedAt == nil {
|
||||
t.Error("deleted entry should have DeletedAt set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntryList_filters_by_parent(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
folder := &Entry{Type: TypeFolder, Title: "Work", VaultData: &VaultData{Title: "Work", Type: "folder"}}
|
||||
EntryCreate(db, vk, folder)
|
||||
|
||||
child := &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: "WorkGitHub",
|
||||
ParentID: folder.EntryID,
|
||||
VaultData: &VaultData{Title: "WorkGitHub", Type: "credential"},
|
||||
}
|
||||
EntryCreate(db, vk, child)
|
||||
|
||||
orphan := &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: "Personal",
|
||||
VaultData: &VaultData{Title: "Personal", Type: "credential"},
|
||||
}
|
||||
EntryCreate(db, vk, orphan)
|
||||
|
||||
parentID := int64(folder.EntryID)
|
||||
children, err := EntryList(db, vk, &parentID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(children) != 1 {
|
||||
t.Fatalf("expected 1 child, got %d", len(children))
|
||||
}
|
||||
if children[0].Title != "WorkGitHub" {
|
||||
t.Errorf("child title = %q", children[0].Title)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntrySearchFuzzy(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
for _, title := range []string{"GitHub", "GitLab", "AWS Console"} {
|
||||
EntryCreate(db, vk, &Entry{
|
||||
Type: TypeCredential,
|
||||
Title: title,
|
||||
VaultData: &VaultData{Title: title, Type: "credential"},
|
||||
})
|
||||
}
|
||||
|
||||
results, err := EntrySearchFuzzy(db, vk, "Git")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(results) != 2 {
|
||||
t.Errorf("search for 'Git' should return 2 results, got %d", len(results))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntryCount(t *testing.T) {
|
||||
db := testDB(t)
|
||||
vk := testVaultKey()
|
||||
|
||||
count, _ := EntryCount(db)
|
||||
if count != 0 {
|
||||
t.Errorf("empty db should have 0 entries, got %d", count)
|
||||
}
|
||||
|
||||
EntryCreate(db, vk, &Entry{
|
||||
Type: TypeCredential, Title: "One",
|
||||
VaultData: &VaultData{Title: "One", Type: "credential"},
|
||||
})
|
||||
EntryCreate(db, vk, &Entry{
|
||||
Type: TypeCredential, Title: "Two",
|
||||
VaultData: &VaultData{Title: "Two", Type: "credential"},
|
||||
})
|
||||
|
||||
count, _ = EntryCount(db)
|
||||
if count != 2 {
|
||||
t.Errorf("expected 2 entries, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuditLog_write_and_read(t *testing.T) {
|
||||
db := testDB(t)
|
||||
|
||||
AuditLog(db, &AuditEvent{
|
||||
Action: ActionCreate,
|
||||
Actor: ActorWeb,
|
||||
Title: "GitHub",
|
||||
IPAddr: "127.0.0.1",
|
||||
})
|
||||
AuditLog(db, &AuditEvent{
|
||||
Action: ActionRead,
|
||||
Actor: ActorAgent,
|
||||
Title: "GitHub",
|
||||
IPAddr: "10.0.0.1",
|
||||
})
|
||||
|
||||
events, err := AuditList(db, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Fatalf("expected 2 audit events, got %d", len(events))
|
||||
}
|
||||
// Both actions should be present (order depends on timestamp resolution)
|
||||
actions := map[string]bool{}
|
||||
for _, e := range events {
|
||||
actions[e.Action] = true
|
||||
}
|
||||
if !actions[ActionCreate] {
|
||||
t.Error("missing create action")
|
||||
}
|
||||
if !actions[ActionRead] {
|
||||
t.Error("missing read action")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSessionCreate_and_Get(t *testing.T) {
|
||||
db := testDB(t)
|
||||
|
||||
session, err := SessionCreate(db, 3600, ActorWeb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if session.Token == "" {
|
||||
t.Fatal("session token should not be empty")
|
||||
}
|
||||
|
||||
got, err := SessionGet(db, session.Token)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("session should exist")
|
||||
}
|
||||
if got.Actor != ActorWeb {
|
||||
t.Errorf("actor = %q, want web", got.Actor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSessionGet_expired(t *testing.T) {
|
||||
db := testDB(t)
|
||||
|
||||
// Create session with negative TTL (guaranteed expired)
|
||||
session, _ := SessionCreate(db, -1, ActorWeb)
|
||||
|
||||
got, err := SessionGet(db, session.Token)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Error("expired session should return nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWebAuthnCredential_store_and_list(t *testing.T) {
|
||||
db := testDB(t)
|
||||
|
||||
cred := &WebAuthnCredential{
|
||||
CredID: HexID(NewID()),
|
||||
Name: "YubiKey",
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
CredentialID: []byte{4, 5, 6},
|
||||
PRFSalt: []byte{7, 8, 9},
|
||||
}
|
||||
if err := StoreWebAuthnCredential(db, cred); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
creds, err := GetWebAuthnCredentials(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(creds) != 1 {
|
||||
t.Fatalf("expected 1 credential, got %d", len(creds))
|
||||
}
|
||||
if creds[0].Name != "YubiKey" {
|
||||
t.Errorf("name = %q", creds[0].Name)
|
||||
}
|
||||
|
||||
count, _ := WebAuthnCredentialCount(db)
|
||||
if count != 1 {
|
||||
t.Errorf("count = %d, want 1", count)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// NewID generates a new int64 ID by partial-hashing a UUID.
|
||||
func NewID() int64 {
|
||||
u := uuid.New()
|
||||
h := sha256.Sum256(u[:])
|
||||
return int64(binary.BigEndian.Uint64(h[:8]) & 0x7FFFFFFFFFFFFFFF)
|
||||
}
|
||||
|
||||
// IDToHex converts an int64 ID to a 16-char lowercase hex string.
|
||||
func IDToHex(id int64) string {
|
||||
return fmt.Sprintf("%016x", id)
|
||||
}
|
||||
|
||||
// HexToID parses a 16-char hex string into an int64 ID.
|
||||
func HexToID(s string) (int64, error) {
|
||||
if len(s) != 16 {
|
||||
return 0, fmt.Errorf("invalid id: must be 16 hex chars, got %d", len(s))
|
||||
}
|
||||
v, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid id: %w", err)
|
||||
}
|
||||
return int64(v), nil
|
||||
}
|
||||
|
||||
// VaultDBPath returns the path to a vault's SQLite file.
|
||||
// Filename is the upper 32 bits of vault_id as 8-char hex.
|
||||
func VaultDBPath(dataDir string, vaultID int64) string {
|
||||
return filepath.Join(dataDir, fmt.Sprintf("%08x.db", uint32(vaultID>>32)))
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewID_unique(t *testing.T) {
|
||||
ids := make(map[int64]bool)
|
||||
for i := 0; i < 1000; i++ {
|
||||
id := NewID()
|
||||
if id <= 0 {
|
||||
t.Fatalf("ID should be positive, got %d", id)
|
||||
}
|
||||
if ids[id] {
|
||||
t.Fatalf("duplicate ID after %d iterations", i)
|
||||
}
|
||||
ids[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
func TestIDToHex_and_back(t *testing.T) {
|
||||
id := NewID()
|
||||
hex := IDToHex(id)
|
||||
if len(hex) != 16 {
|
||||
t.Fatalf("hex should be 16 chars, got %d: %s", len(hex), hex)
|
||||
}
|
||||
back, err := HexToID(hex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if back != id {
|
||||
t.Errorf("roundtrip failed: %d -> %s -> %d", id, hex, back)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHexToID_invalid(t *testing.T) {
|
||||
_, err := HexToID("short")
|
||||
if err == nil {
|
||||
t.Error("should reject short hex")
|
||||
}
|
||||
_, err = HexToID("zzzzzzzzzzzzzzzz")
|
||||
if err == nil {
|
||||
t.Error("should reject non-hex chars")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,353 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DetectAndParse attempts to parse known password manager formats directly.
|
||||
// Returns (entries, true) if format recognized, (nil, false) if unknown.
|
||||
func DetectAndParse(content []byte) ([]VaultData, bool) {
|
||||
// If it's a zip, extract the first JSON file and parse that
|
||||
if content, ok := extractFromZip(content); ok {
|
||||
return DetectAndParse(content)
|
||||
}
|
||||
// Strip UTF-8 BOM if present (common in browser CSV exports)
|
||||
content = bytes.TrimPrefix(content, []byte{0xEF, 0xBB, 0xBF})
|
||||
// Try Bitwarden/generic JSON array with "items" key
|
||||
if entries, ok := parseBitwardenJSON(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
// Try Proton Pass JSON
|
||||
if entries, ok := parseProtonJSON(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
// Try Chrome/Firefox CSV
|
||||
if entries, ok := parseBrowserCSV(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// extractFromZip tries to read content as a zip archive and returns the first
|
||||
// JSON or CSV file found inside. Used for Proton Pass exports (zip containing JSON).
|
||||
func extractFromZip(content []byte) ([]byte, bool) {
|
||||
r, err := zip.NewReader(bytes.NewReader(content), int64(len(content)))
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, f := range r.File {
|
||||
name := strings.ToLower(f.Name)
|
||||
if strings.HasSuffix(name, ".json") || strings.HasSuffix(name, ".csv") {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
data, err := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return data, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// readCSV parses content with the given delimiter. Returns (records, true) if valid with ≥2 rows.
|
||||
func readCSV(content []byte, delim rune) ([][]string, bool) {
|
||||
r := csv.NewReader(bytes.NewReader(content))
|
||||
r.Comma = delim
|
||||
r.LazyQuotes = true
|
||||
r.TrimLeadingSpace = true
|
||||
records, err := r.ReadAll()
|
||||
if err != nil || len(records) < 2 {
|
||||
return nil, false
|
||||
}
|
||||
// Sanity: header should have at least 3 columns
|
||||
if len(records[0]) < 3 {
|
||||
return nil, false
|
||||
}
|
||||
return records, true
|
||||
}
|
||||
|
||||
// --- Chrome CSV ---
|
||||
// Columns: name,url,username,password (Chrome)
|
||||
// Columns: url,username,password,httpRealm,formActionOrigin,guid,timeCreated,timeLastUsed,timePasswordChanged (Firefox)
|
||||
func parseBrowserCSV(content []byte) ([]VaultData, bool) {
|
||||
// Try comma first, then semicolon (European locale exports)
|
||||
records, ok := readCSV(content, ',')
|
||||
if !ok {
|
||||
records, ok = readCSV(content, ';')
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
header := records[0]
|
||||
colIdx := map[string]int{}
|
||||
for i, h := range header {
|
||||
colIdx[strings.ToLower(strings.TrimSpace(h))] = i
|
||||
}
|
||||
|
||||
// Detect Chrome: name, url, username, password
|
||||
// Detect Firefox: url, username, password (+ extras)
|
||||
urlCol, hasURL := colIdx["url"]
|
||||
userCol, hasUser := colIdx["username"]
|
||||
passCol, hasPass := colIdx["password"]
|
||||
nameCol, hasName := colIdx["name"]
|
||||
// Firefox: timePasswordChanged (Unix microseconds)
|
||||
timeChangedCol, hasTimeChanged := colIdx["timepasswordchanged"]
|
||||
|
||||
if !hasURL || !hasUser || !hasPass {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var entries []VaultData
|
||||
for _, row := range records[1:] {
|
||||
if len(row) <= urlCol || len(row) <= passCol {
|
||||
continue
|
||||
}
|
||||
title := ""
|
||||
if hasName && len(row) > nameCol {
|
||||
title = row[nameCol]
|
||||
}
|
||||
if title == "" {
|
||||
title = row[urlCol]
|
||||
}
|
||||
entry := VaultData{
|
||||
Title: title,
|
||||
Type: "credential",
|
||||
Fields: []VaultField{
|
||||
{Label: "Username", Value: row[userCol], Kind: "text"},
|
||||
{Label: "Password", Value: row[passCol], Kind: "password"},
|
||||
},
|
||||
}
|
||||
if row[urlCol] != "" {
|
||||
entry.URLs = []string{row[urlCol]}
|
||||
}
|
||||
// Firefox stores timestamps as Unix microseconds
|
||||
if hasTimeChanged && len(row) > timeChangedCol && row[timeChangedCol] != "" {
|
||||
if us, err := strconv.ParseInt(row[timeChangedCol], 10, 64); err == nil && us > 0 {
|
||||
entry.SourceModified = us / 1_000_000 // microseconds → seconds
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
||||
// --- Bitwarden JSON ---
|
||||
type bitwardenExport struct {
|
||||
Items []bitwardenItem `json:"items"`
|
||||
}
|
||||
type bitwardenItem struct {
|
||||
Name string `json:"name"`
|
||||
Type int `json:"type"` // 1=login, 2=note, 3=card, 4=identity
|
||||
Notes string `json:"notes"`
|
||||
RevisionDate string `json:"revisionDate"` // RFC3339
|
||||
Login *bitwardenLogin `json:"login"`
|
||||
Card *bitwardenCard `json:"card"`
|
||||
Identity *bitwardenIdentity `json:"identity"`
|
||||
}
|
||||
type bitwardenLogin struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Totp string `json:"totp"`
|
||||
URIs []struct{ URI string `json:"uri"` } `json:"uris"`
|
||||
}
|
||||
type bitwardenCard struct {
|
||||
CardholderName string `json:"cardholderName"`
|
||||
Number string `json:"number"`
|
||||
ExpMonth string `json:"expMonth"`
|
||||
ExpYear string `json:"expYear"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
type bitwardenIdentity struct {
|
||||
FirstName string `json:"firstName"`
|
||||
LastName string `json:"lastName"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phone"`
|
||||
Address1 string `json:"address1"`
|
||||
City string `json:"city"`
|
||||
State string `json:"state"`
|
||||
PostalCode string `json:"postalCode"`
|
||||
Country string `json:"country"`
|
||||
}
|
||||
|
||||
func parseBitwardenJSON(content []byte) ([]VaultData, bool) {
|
||||
var bw bitwardenExport
|
||||
if err := json.Unmarshal(content, &bw); err != nil || len(bw.Items) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var entries []VaultData
|
||||
for _, item := range bw.Items {
|
||||
vd := VaultData{Title: item.Name, Notes: item.Notes}
|
||||
switch item.Type {
|
||||
case 1: // login
|
||||
vd.Type = "credential"
|
||||
if item.Login != nil {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Username", Value: item.Login.Username, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Password", Value: item.Login.Password, Kind: "password"})
|
||||
if item.Login.Totp != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "TOTP Seed", Value: item.Login.Totp, Kind: "totp"})
|
||||
}
|
||||
for _, u := range item.Login.URIs {
|
||||
if u.URI != "" {
|
||||
vd.URLs = append(vd.URLs, u.URI)
|
||||
}
|
||||
}
|
||||
}
|
||||
case 2: // note
|
||||
vd.Type = "note"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Content", Value: item.Notes, Kind: "text"})
|
||||
vd.Notes = ""
|
||||
case 3: // card
|
||||
vd.Type = "card"
|
||||
if item.Card != nil {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Cardholder", Value: item.Card.CardholderName, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Number", Value: item.Card.Number, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "CVV", Value: item.Card.Code, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Expiry", Value: item.Card.ExpMonth + "/" + item.Card.ExpYear, Kind: "text"})
|
||||
}
|
||||
case 4: // identity
|
||||
vd.Type = "identity"
|
||||
if item.Identity != nil {
|
||||
id := item.Identity
|
||||
addField := func(label, value string) {
|
||||
if value != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: label, Value: value, Kind: "text"})
|
||||
}
|
||||
}
|
||||
addField("First Name", id.FirstName)
|
||||
addField("Last Name", id.LastName)
|
||||
addField("Email", id.Email)
|
||||
addField("Phone", id.Phone)
|
||||
addField("Address", id.Address1)
|
||||
addField("City", id.City)
|
||||
addField("State", id.State)
|
||||
addField("ZIP", id.PostalCode)
|
||||
addField("Country", id.Country)
|
||||
}
|
||||
}
|
||||
// Parse revisionDate → SourceModified
|
||||
if item.RevisionDate != "" {
|
||||
if t, err := time.Parse(time.RFC3339, item.RevisionDate); err == nil {
|
||||
vd.SourceModified = t.Unix()
|
||||
}
|
||||
}
|
||||
entries = append(entries, vd)
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
||||
// --- Proton Pass JSON ---
|
||||
// Proton nests everything under item.data: type, metadata, content are all inside data.
|
||||
type protonExport struct {
|
||||
Vaults map[string]protonVault `json:"vaults"`
|
||||
}
|
||||
type protonVault struct {
|
||||
Items []protonItem `json:"items"`
|
||||
}
|
||||
type protonItem struct {
|
||||
Data protonItemData `json:"data"`
|
||||
ModifyTime int64 `json:"modifyTime"` // Unix seconds
|
||||
CreateTime int64 `json:"createTime"`
|
||||
}
|
||||
type protonItemData struct {
|
||||
Type string `json:"type"`
|
||||
Metadata protonMeta `json:"metadata"`
|
||||
Content protonContent `json:"content"`
|
||||
}
|
||||
type protonMeta struct {
|
||||
Name string `json:"name"`
|
||||
Note string `json:"note"`
|
||||
}
|
||||
type protonContent struct {
|
||||
// login
|
||||
ItemUsername string `json:"itemUsername"`
|
||||
ItemEmail string `json:"itemEmail"`
|
||||
Password string `json:"password"`
|
||||
Urls []string `json:"urls"`
|
||||
TOTPUri string `json:"totpUri"`
|
||||
// card
|
||||
CardholderName string `json:"cardholderName"`
|
||||
Number string `json:"number"`
|
||||
VerificationNumber string `json:"verificationNumber"`
|
||||
ExpirationDate string `json:"expirationDate"`
|
||||
// identity
|
||||
FullName string `json:"fullName"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phoneNumber"`
|
||||
// alias
|
||||
AliasEmail string `json:"aliasEmail"`
|
||||
}
|
||||
|
||||
func parseProtonJSON(content []byte) ([]VaultData, bool) {
|
||||
var pe protonExport
|
||||
if err := json.Unmarshal(content, &pe); err != nil || len(pe.Vaults) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var entries []VaultData
|
||||
for _, vault := range pe.Vaults {
|
||||
for _, item := range vault.Items {
|
||||
d := item.Data
|
||||
vd := VaultData{
|
||||
Title: d.Metadata.Name,
|
||||
Notes: d.Metadata.Note,
|
||||
URLs: d.Content.Urls,
|
||||
}
|
||||
// Pick best username: itemUsername, itemEmail, or email
|
||||
username := d.Content.ItemUsername
|
||||
if username == "" {
|
||||
username = d.Content.ItemEmail
|
||||
}
|
||||
switch d.Type {
|
||||
case "login":
|
||||
vd.Type = "credential"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Username", Value: username, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Password", Value: d.Content.Password, Kind: "password"})
|
||||
if d.Content.TOTPUri != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "TOTP Seed", Value: d.Content.TOTPUri, Kind: "totp"})
|
||||
}
|
||||
case "creditCard":
|
||||
vd.Type = "card"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Cardholder", Value: d.Content.CardholderName, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Number", Value: d.Content.Number, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "CVV", Value: d.Content.VerificationNumber, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Expiry", Value: d.Content.ExpirationDate, Kind: "text"})
|
||||
case "identity":
|
||||
vd.Type = "identity"
|
||||
addF := func(l, v string) {
|
||||
if v != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: l, Value: v, Kind: "text"})
|
||||
}
|
||||
}
|
||||
addF("Full Name", d.Content.FullName)
|
||||
addF("Email", d.Content.Email)
|
||||
addF("Phone", d.Content.Phone)
|
||||
case "alias":
|
||||
continue // Proton-specific email alias — not a vault entry
|
||||
case "note":
|
||||
vd.Type = "note"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Content", Value: d.Metadata.Note, Kind: "text"})
|
||||
vd.Notes = ""
|
||||
default:
|
||||
vd.Type = "note"
|
||||
}
|
||||
if item.ModifyTime > 0 {
|
||||
vd.SourceModified = item.ModifyTime
|
||||
}
|
||||
entries = append(entries, vd)
|
||||
}
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
|
@ -1,158 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDetectAndParse_ChromeCSV(t *testing.T) {
|
||||
csv := "name,url,username,password\nGitHub,https://github.com,octocat,hunter2\nAWS,https://aws.amazon.com,admin,s3cret\n"
|
||||
entries, ok := DetectAndParse([]byte(csv))
|
||||
if !ok {
|
||||
t.Fatal("should detect Chrome CSV")
|
||||
}
|
||||
if len(entries) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(entries))
|
||||
}
|
||||
if entries[0].Title != "GitHub" {
|
||||
t.Errorf("title = %q", entries[0].Title)
|
||||
}
|
||||
if entries[0].Type != "credential" {
|
||||
t.Errorf("type = %q", entries[0].Type)
|
||||
}
|
||||
if len(entries[0].URLs) == 0 || entries[0].URLs[0] != "https://github.com" {
|
||||
t.Errorf("URL not parsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_FirefoxCSV(t *testing.T) {
|
||||
csv := "url,username,password,httpRealm,formActionOrigin,guid,timeCreated,timeLastUsed,timePasswordChanged\nhttps://example.com,user@example.com,pass123,,,,,,1700000000000000\n"
|
||||
entries, ok := DetectAndParse([]byte(csv))
|
||||
if !ok {
|
||||
t.Fatal("should detect Firefox CSV")
|
||||
}
|
||||
if len(entries) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(entries))
|
||||
}
|
||||
// Firefox stores timePasswordChanged as microseconds
|
||||
if entries[0].SourceModified != 1700000000 {
|
||||
t.Errorf("SourceModified = %d, want 1700000000", entries[0].SourceModified)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_BitwardenJSON(t *testing.T) {
|
||||
json := `{"items":[{"name":"GitHub","type":1,"login":{"username":"octocat","password":"p@ss","uris":[{"uri":"https://github.com"}]},"revisionDate":"2024-01-15T10:00:00Z"}]}`
|
||||
entries, ok := DetectAndParse([]byte(json))
|
||||
if !ok {
|
||||
t.Fatal("should detect Bitwarden JSON")
|
||||
}
|
||||
if len(entries) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(entries))
|
||||
}
|
||||
if entries[0].Title != "GitHub" {
|
||||
t.Errorf("title = %q", entries[0].Title)
|
||||
}
|
||||
if entries[0].SourceModified == 0 {
|
||||
t.Error("SourceModified should be parsed from revisionDate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_BitwardenCard(t *testing.T) {
|
||||
json := `{"items":[{"name":"Amex","type":3,"card":{"cardholderName":"Johan","number":"378282246310005","code":"1234","expMonth":"09","expYear":"28"}}]}`
|
||||
entries, ok := DetectAndParse([]byte(json))
|
||||
if !ok {
|
||||
t.Fatal("should detect Bitwarden card")
|
||||
}
|
||||
if entries[0].Type != "card" {
|
||||
t.Errorf("type = %q, want card", entries[0].Type)
|
||||
}
|
||||
// Card number and CVV should be auto-flagged L2
|
||||
for _, f := range entries[0].Fields {
|
||||
if f.Label == "Number" && !f.L2 {
|
||||
t.Error("card number should be L2")
|
||||
}
|
||||
if f.Label == "CVV" && !f.L2 {
|
||||
t.Error("CVV should be L2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_unknown_format(t *testing.T) {
|
||||
_, ok := DetectAndParse([]byte("this is not a known format"))
|
||||
if ok {
|
||||
t.Error("should not detect unknown format")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_labels(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Bank",
|
||||
Fields: []VaultField{
|
||||
{Label: "Username", Value: "user", Kind: "text"},
|
||||
{Label: "Card Number", Value: "4111111111111111", Kind: "text"},
|
||||
{Label: "CVV", Value: "123", Kind: "text"},
|
||||
{Label: "SSN", Value: "123-45-6789", Kind: "text"},
|
||||
{Label: "API Key", Value: "sk_live_abc", Kind: "text"},
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
expectations := map[string]bool{
|
||||
"Username": false,
|
||||
"Card Number": true,
|
||||
"CVV": true,
|
||||
"SSN": true,
|
||||
"API Key": false,
|
||||
}
|
||||
for _, f := range entries[0].Fields {
|
||||
want, ok := expectations[f.Label]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if f.L2 != want {
|
||||
t.Errorf("field %q: L2=%v, want %v", f.Label, f.L2, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_title_match_marks_all(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Coinbase Wallet",
|
||||
Fields: []VaultField{
|
||||
{Label: "Email", Value: "me@example.com", Kind: "text"},
|
||||
{Label: "Password", Value: "secret", Kind: "password"},
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
for _, f := range entries[0].Fields {
|
||||
if !f.L2 {
|
||||
t.Errorf("field %q should be L2 (title matched crypto exchange)", f.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_multilingual(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Docs",
|
||||
Fields: []VaultField{
|
||||
{Label: "Paspoort", Value: "NL12345", Kind: "text"}, // Dutch
|
||||
{Label: "Führerschein", Value: "DE12345", Kind: "text"}, // German
|
||||
{Label: "身份证", Value: "CN12345", Kind: "text"}, // Chinese
|
||||
{Label: "パスポート", Value: "JP12345", Kind: "text"}, // Japanese
|
||||
{Label: "PESEL", Value: "PL12345", Kind: "text"}, // Polish
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
for _, f := range entries[0].Fields {
|
||||
if !f.L2 {
|
||||
t.Errorf("field %q should be auto-detected as L2", f.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
package lib
|
||||
|
||||
import "strings"
|
||||
|
||||
// L2Labels contains substrings that mark a field as L2 (client-side encrypted
|
||||
// only) during import. Matching is case-insensitive: if any substring appears
|
||||
// in a field label, that field is flagged L2.
|
||||
//
|
||||
// L2 = things an AI agent should NEVER need. Personal identity, payment cards,
|
||||
// government IDs. NOT API keys, SSH keys, TOTP — those are L1 (agent-readable).
|
||||
//
|
||||
// Contributing: add new terms anywhere in the list. Group by language or
|
||||
// category, keep entries lowercase, and include a comment for the language.
|
||||
var L2Labels = []string{
|
||||
// Card / payment
|
||||
"cvv", "cvc", "csv", "security code", "card number", "card no",
|
||||
"pin code", "pin-code",
|
||||
|
||||
// Banking
|
||||
"routing number", "account number", "iban", "swift", "sort code",
|
||||
|
||||
// Government ID — English
|
||||
"ssn", "social security", "passport", "driver license", "driver's license",
|
||||
"driving license", "driving licence", "national id", "id card", "id number",
|
||||
"tax id", "identification number",
|
||||
|
||||
// Dutch — BSN = burgerservicenummer
|
||||
"bsn", "burgerservicenummer", "rijbewijs", "paspoort", "identiteitskaart", "identiteitsbewijs",
|
||||
|
||||
// German — SVN = Sozialversicherungsnummer, StID = Steuer-ID
|
||||
"sozialversicherungsnummer", "steuer-id", "steuernummer",
|
||||
"führerschein", "fuhrerschein", "sozialversicherung", "reisepass", "personalausweis",
|
||||
|
||||
// French — NIR = numéro d'inscription au répertoire, CNI = carte nationale d'identité
|
||||
"nir", "cni", "numéro de sécurité", "numero de securite",
|
||||
"permis de conduire", "carte d'identit", "carte d identit", "passeport",
|
||||
|
||||
// Spanish — DNI, NIE, NIF, CURP (Mexico)
|
||||
"dni", "nie", "nif", "curp",
|
||||
"licencia de conducir", "seguro social", "pasaporte", "tarjeta de identidad", "cédula", "cedula",
|
||||
|
||||
// Portuguese — CPF, CNH = carteira nacional de habilitação, RNE = registro nacional de estrangeiros
|
||||
"cpf", "cnh", "rne",
|
||||
"carteira de motorista", "carteira de identidade", "passaporte",
|
||||
|
||||
// Italian — CF = codice fiscale, tessera sanitaria
|
||||
"codice fiscale", "tessera sanitaria",
|
||||
"patente di guida", "passaporto", "carta d'identit", "carta d identit",
|
||||
|
||||
// Chinese — 身份证号 = ID number, 社会保障号 = social security number
|
||||
"身份证", "护照", "驾照", "驾驶证", "社保", "社会保障号", "居民身份",
|
||||
|
||||
// Japanese — マイナンバー = My Number, 運転免許証 = driver's license
|
||||
"パスポート", "免許", "マイナンバー", "運転免許証", "住民票",
|
||||
|
||||
// Korean — 주민등록번호 = resident registration number
|
||||
"여권", "운전면허", "주민등록", "주민등록번호", "외국인등록",
|
||||
|
||||
// Russian — ИНН = tax ID, СНИЛС = social insurance, ВУ = driver's license
|
||||
"паспорт", "водительск", "снилс", "инн",
|
||||
|
||||
// Arabic — رقم الهوية = ID number, جواز سفر = passport
|
||||
"جواز سفر", "رخصة قيادة", "بطاقة هوية", "رقم الهوية",
|
||||
|
||||
// Hindi — PAN = permanent account number, Aadhaar
|
||||
"पासपोर्ट", "आधार", "लाइसेंस", "pan card",
|
||||
|
||||
// Turkish — TC Kimlik = national ID number
|
||||
"pasaport", "ehliyet", "kimlik numar", "tc kimlik", "nüfus",
|
||||
|
||||
// Polish — PESEL = national ID, NIP = tax ID, dowód = ID card
|
||||
"pesel", "nip",
|
||||
"paszport", "prawo jazdy", "dowód osobisty", "dowod osobisty",
|
||||
|
||||
// Swedish — pass = passport, samordningsnummer = coordination number
|
||||
"körkort", "personnummer", "samordningsnummer",
|
||||
|
||||
// Thai — บัตรประชาชน = ID card, หนังสือเดินทาง = passport, ใบขับขี่ = driver's license
|
||||
"บัตรประชาชน", "หนังสือเดินทาง", "ใบขับขี่",
|
||||
|
||||
// Vietnamese — CMND/CCCD = citizen ID, hộ chiếu = passport, GPLX = driver's license
|
||||
"cmnd", "cccd", "hộ chiếu", "ho chieu",
|
||||
}
|
||||
|
||||
// L2Titles contains substrings matched against entry titles. If an entry's
|
||||
// title matches, ALL fields in that entry are flagged L2.
|
||||
// These are things a human needs but an agent never would.
|
||||
var L2Titles = []string{
|
||||
// Recovery / backup codes — human-only fallback
|
||||
"backup code", "recovery code", "recovery key", "backup key",
|
||||
"restore code", "restore key", "reset code",
|
||||
|
||||
// Crypto wallet seeds — human-only
|
||||
"seed phrase", "mnemonic", "recovery phrase", "wallet seed",
|
||||
|
||||
// Pairing codes — one-time human setup
|
||||
"pairing code", "pairing key",
|
||||
|
||||
// Crypto exchanges & wallets — entire record is sensitive
|
||||
"coinbase", "binance", "kraken", "gemini", "bitstamp", "bitfinex",
|
||||
"crypto.com", "kucoin", "bybit", "okx", "gate.io", "huobi", "htx",
|
||||
"bitget", "mexc", "upbit", "bithumb",
|
||||
"aa.com",
|
||||
"metamask", "phantom", "ledger", "trezor", "exodus", "trust wallet",
|
||||
"electrum", "myetherwallet", "blockchain.com",
|
||||
}
|
||||
|
||||
// AutoL2Fields scans all fields in each VaultData and sets L2=true if the
|
||||
// field label or entry title matches a sensitive pattern. Called after import.
|
||||
func AutoL2Fields(entries []VaultData) {
|
||||
for i := range entries {
|
||||
// Check title — if it matches, mark ALL fields L2
|
||||
titleLower := strings.ToLower(entries[i].Title)
|
||||
titleMatch := false
|
||||
for _, pat := range L2Titles {
|
||||
if strings.Contains(titleLower, pat) {
|
||||
titleMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if titleMatch {
|
||||
for j := range entries[i].Fields {
|
||||
entries[i].Fields[j].L2 = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check individual field labels
|
||||
for j := range entries[i].Fields {
|
||||
if entries[i].Fields[j].L2 {
|
||||
continue
|
||||
}
|
||||
lower := strings.ToLower(entries[i].Fields[j].Label)
|
||||
for _, pat := range L2Labels {
|
||||
if strings.Contains(lower, pat) {
|
||||
entries[i].Fields[j].L2 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,308 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TelemetryConfig controls the optional telemetry reporter.
|
||||
// All fields zero/empty = telemetry disabled.
|
||||
type TelemetryConfig struct {
|
||||
FreqSeconds int // interval between POSTs (0 = disabled)
|
||||
Host string // e.g. https://hq.clavitor.com/telemetry
|
||||
Token string // Bearer token for auth
|
||||
DataDir string // vault data directory (to scan DBs)
|
||||
Version string // build version string
|
||||
}
|
||||
|
||||
// TelemetryPayload is the JSON body posted to the telemetry endpoint.
|
||||
type TelemetryPayload struct {
|
||||
Version string `json:"version"`
|
||||
Hostname string `json:"hostname"`
|
||||
UptimeSeconds int64 `json:"uptime_seconds"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
System SystemMetrics `json:"system"`
|
||||
Vaults VaultMetrics `json:"vaults"`
|
||||
}
|
||||
|
||||
type SystemMetrics struct {
|
||||
OS string `json:"os"`
|
||||
Arch string `json:"arch"`
|
||||
CPUs int `json:"cpus"`
|
||||
CPUPercent float64 `json:"cpu_percent"`
|
||||
MemTotalMB int64 `json:"memory_total_mb"`
|
||||
MemUsedMB int64 `json:"memory_used_mb"`
|
||||
DiskTotalMB int64 `json:"disk_total_mb"`
|
||||
DiskUsedMB int64 `json:"disk_used_mb"`
|
||||
Load1m float64 `json:"load_1m"`
|
||||
}
|
||||
|
||||
type VaultMetrics struct {
|
||||
Count int `json:"count"`
|
||||
TotalSizeMB int64 `json:"total_size_mb"`
|
||||
TotalEntries int64 `json:"total_entries"`
|
||||
}
|
||||
|
||||
// StartTelemetry launches a background goroutine that periodically
|
||||
// collects metrics and POSTs them to cfg.Host. Does nothing if
|
||||
// FreqSeconds <= 0 or Host is empty.
|
||||
func StartTelemetry(cfg TelemetryConfig) {
|
||||
if cfg.FreqSeconds <= 0 || cfg.Host == "" {
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
interval := time.Duration(cfg.FreqSeconds) * time.Second
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
|
||||
log.Printf("Telemetry enabled: posting every %ds to %s", cfg.FreqSeconds, cfg.Host)
|
||||
|
||||
go func() {
|
||||
// Post immediately on startup at 10s intervals until ACK'd,
|
||||
// then settle into the normal interval.
|
||||
retry := 10 * time.Second
|
||||
for {
|
||||
payload := CollectPayload(cfg, startTime)
|
||||
if postTelemetry(client, cfg.Host, cfg.Token, payload) {
|
||||
time.Sleep(interval)
|
||||
} else {
|
||||
log.Printf("telemetry: no ACK, retrying in %s", retry)
|
||||
time.Sleep(retry)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// CollectPayload gathers system and vault metrics into a TelemetryPayload.
|
||||
func CollectPayload(cfg TelemetryConfig, startTime time.Time) TelemetryPayload {
|
||||
hostname, _ := os.Hostname()
|
||||
|
||||
return TelemetryPayload{
|
||||
Version: cfg.Version,
|
||||
Hostname: hostname,
|
||||
UptimeSeconds: int64(time.Since(startTime).Seconds()),
|
||||
Timestamp: time.Now().UTC().Format(time.RFC3339),
|
||||
System: collectSystemMetrics(cfg.DataDir),
|
||||
Vaults: collectVaultMetrics(cfg.DataDir),
|
||||
}
|
||||
}
|
||||
|
||||
func postTelemetry(client *http.Client, host, token string, payload TelemetryPayload) bool {
|
||||
body, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
log.Printf("telemetry: marshal error: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", host, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
log.Printf("telemetry: request error: %v", err)
|
||||
return false
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("telemetry: post error: %v", err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 300 {
|
||||
log.Printf("telemetry: unexpected status %d", resp.StatusCode)
|
||||
return false
|
||||
}
|
||||
|
||||
var ack struct{ OK bool `json:"ok"` }
|
||||
json.NewDecoder(resp.Body).Decode(&ack)
|
||||
return ack.OK
|
||||
}
|
||||
|
||||
func collectSystemMetrics(dataDir string) SystemMetrics {
|
||||
m := SystemMetrics{
|
||||
OS: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
CPUs: runtime.NumCPU(),
|
||||
}
|
||||
|
||||
m.CPUPercent = readCPUPercent()
|
||||
m.MemTotalMB, m.MemUsedMB = readMemInfo()
|
||||
m.DiskTotalMB, m.DiskUsedMB = readDiskUsage(dataDir)
|
||||
m.Load1m = readLoadAvg()
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// readCPUPercent samples /proc/stat twice 500ms apart to compute real CPU usage.
|
||||
func readCPUPercent() float64 {
|
||||
s1 := readCPUStat()
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
s2 := readCPUStat()
|
||||
|
||||
total1, total2 := sumUint64(s1), sumUint64(s2)
|
||||
totalDiff := total2 - total1
|
||||
if totalDiff == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Field index 3 is idle time.
|
||||
idleDiff := s2[3] - s1[3]
|
||||
return float64(totalDiff-idleDiff) / float64(totalDiff) * 100
|
||||
}
|
||||
|
||||
func readCPUStat() []uint64 {
|
||||
data, err := os.ReadFile("/proc/stat")
|
||||
if err != nil {
|
||||
return make([]uint64, 10)
|
||||
}
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
if !strings.HasPrefix(line, "cpu ") {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)[1:] // skip "cpu"
|
||||
vals := make([]uint64, len(fields))
|
||||
for i, f := range fields {
|
||||
vals[i], _ = strconv.ParseUint(f, 10, 64)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
return make([]uint64, 10)
|
||||
}
|
||||
|
||||
func sumUint64(vals []uint64) uint64 {
|
||||
var t uint64
|
||||
for _, v := range vals {
|
||||
t += v
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// readMemInfo parses /proc/meminfo for total and used memory.
|
||||
// Falls back to Go runtime stats on non-Linux.
|
||||
func readMemInfo() (totalMB, usedMB int64) {
|
||||
data, err := os.ReadFile("/proc/meminfo")
|
||||
if err != nil {
|
||||
// Fallback: Go runtime memory stats (process only, not system).
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
return int64(ms.Sys / 1024 / 1024), int64(ms.Alloc / 1024 / 1024)
|
||||
}
|
||||
|
||||
var total, available int64
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
val, _ := strconv.ParseInt(fields[1], 10, 64)
|
||||
switch fields[0] {
|
||||
case "MemTotal:":
|
||||
total = val // kB
|
||||
case "MemAvailable:":
|
||||
available = val // kB
|
||||
}
|
||||
}
|
||||
|
||||
totalMB = total / 1024
|
||||
usedMB = (total - available) / 1024
|
||||
return
|
||||
}
|
||||
|
||||
// readDiskUsage returns total and used disk space for the filesystem
|
||||
// containing dataDir.
|
||||
func readDiskUsage(path string) (totalMB, usedMB int64) {
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
|
||||
var stat syscall.Statfs_t
|
||||
if err := syscall.Statfs(path, &stat); err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
totalBytes := stat.Blocks * uint64(stat.Bsize)
|
||||
freeBytes := stat.Bavail * uint64(stat.Bsize)
|
||||
totalMB = int64(totalBytes / 1024 / 1024)
|
||||
usedMB = int64((totalBytes - freeBytes) / 1024 / 1024)
|
||||
return
|
||||
}
|
||||
|
||||
// readLoadAvg parses /proc/loadavg for the 1-minute load average.
|
||||
func readLoadAvg() float64 {
|
||||
data, err := os.ReadFile("/proc/loadavg")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
fields := strings.Fields(string(data))
|
||||
if len(fields) < 1 {
|
||||
return 0
|
||||
}
|
||||
load, _ := strconv.ParseFloat(fields[0], 64)
|
||||
return load
|
||||
}
|
||||
|
||||
// collectVaultMetrics scans dataDir for .db files and counts entries.
|
||||
func collectVaultMetrics(dataDir string) VaultMetrics {
|
||||
if dataDir == "" {
|
||||
dataDir = "."
|
||||
}
|
||||
|
||||
var m VaultMetrics
|
||||
|
||||
matches, err := filepath.Glob(filepath.Join(dataDir, "*.db"))
|
||||
if err != nil {
|
||||
return m
|
||||
}
|
||||
|
||||
for _, dbPath := range matches {
|
||||
base := filepath.Base(dbPath)
|
||||
// Skip non-vault databases.
|
||||
if base == "node.db" {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := os.Stat(dbPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
m.Count++
|
||||
m.TotalSizeMB += info.Size() / 1024 / 1024
|
||||
|
||||
count := countEntries(dbPath)
|
||||
m.TotalEntries += count
|
||||
}
|
||||
|
||||
// For self-hosted mode with a single DB, the size might round to 0.
|
||||
// Report in KB precision via MB (allow fractional).
|
||||
return m
|
||||
}
|
||||
|
||||
func countEntries(dbPath string) int64 {
|
||||
db, err := sql.Open("sqlite3", fmt.Sprintf("file:%s?mode=ro&_journal_mode=WAL", dbPath))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var count int64
|
||||
err = db.QueryRow("SELECT COUNT(*) FROM entries WHERE deleted_at IS NULL").Scan(&count)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
|
@ -1,106 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCollectPayload(t *testing.T) {
|
||||
cfg := TelemetryConfig{
|
||||
FreqSeconds: 60,
|
||||
Host: "http://localhost:9999",
|
||||
Token: "test-token",
|
||||
DataDir: t.TempDir(),
|
||||
Version: "test-1.0",
|
||||
}
|
||||
startTime := time.Now().Add(-5 * time.Minute)
|
||||
|
||||
payload := CollectPayload(cfg, startTime)
|
||||
|
||||
if payload.Version != "test-1.0" {
|
||||
t.Errorf("version = %q, want test-1.0", payload.Version)
|
||||
}
|
||||
if payload.Hostname == "" {
|
||||
t.Error("hostname should not be empty")
|
||||
}
|
||||
if payload.UptimeSeconds < 299 {
|
||||
t.Errorf("uptime should be ~300s, got %d", payload.UptimeSeconds)
|
||||
}
|
||||
if payload.Timestamp == "" {
|
||||
t.Error("timestamp should not be empty")
|
||||
}
|
||||
if payload.System.OS == "" {
|
||||
t.Error("OS should not be empty")
|
||||
}
|
||||
if payload.System.CPUs < 1 {
|
||||
t.Errorf("CPUs should be >= 1, got %d", payload.System.CPUs)
|
||||
}
|
||||
if payload.System.MemTotalMB <= 0 {
|
||||
t.Errorf("memory total should be > 0, got %d", payload.System.MemTotalMB)
|
||||
}
|
||||
|
||||
// JSON roundtrip
|
||||
data, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
t.Fatalf("marshal: %v", err)
|
||||
}
|
||||
var decoded TelemetryPayload
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
t.Fatalf("unmarshal: %v", err)
|
||||
}
|
||||
if decoded.Hostname != payload.Hostname {
|
||||
t.Error("hostname mismatch after roundtrip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostTelemetry(t *testing.T) {
|
||||
var mu sync.Mutex
|
||||
var received TelemetryPayload
|
||||
var authHeader string
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
authHeader = r.Header.Get("Authorization")
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
json.Unmarshal(body, &received)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
cfg := TelemetryConfig{
|
||||
FreqSeconds: 1,
|
||||
Host: server.URL,
|
||||
Token: "secret-token",
|
||||
DataDir: t.TempDir(),
|
||||
Version: "test-post",
|
||||
}
|
||||
|
||||
StartTelemetry(cfg)
|
||||
time.Sleep(2 * time.Second) // CPU sampling takes 500ms, then POST
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if authHeader != "Bearer secret-token" {
|
||||
t.Errorf("expected Bearer secret-token, got %q", authHeader)
|
||||
}
|
||||
if received.Version != "test-post" {
|
||||
t.Errorf("version = %q, want test-post", received.Version)
|
||||
}
|
||||
if received.Hostname == "" {
|
||||
t.Error("hostname should not be empty in posted payload")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTelemetryDisabled(t *testing.T) {
|
||||
// None of these should panic or start goroutines.
|
||||
StartTelemetry(TelemetryConfig{})
|
||||
StartTelemetry(TelemetryConfig{FreqSeconds: 0, Host: "http://example.com"})
|
||||
StartTelemetry(TelemetryConfig{FreqSeconds: 60, Host: ""})
|
||||
}
|
||||
|
|
@ -1,208 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/caddyserver/certmagic"
|
||||
"github.com/libdns/cloudflare"
|
||||
"github.com/mholt/acmez/v3/acme"
|
||||
)
|
||||
|
||||
// TLSConfig holds configuration for TLS.
|
||||
type TLSConfig struct {
|
||||
Domain string // e.g. "use1.clavitor.ai"
|
||||
CFToken string // Cloudflare API token for DNS-01 challenge
|
||||
DataDir string // directory to store certs
|
||||
Email string // ACME account email
|
||||
}
|
||||
|
||||
// LoadTLSConfig reads TLS configuration from environment variables.
|
||||
func LoadTLSConfig() TLSConfig {
|
||||
return TLSConfig{
|
||||
Domain: os.Getenv("TLS_DOMAIN"),
|
||||
CFToken: os.Getenv("CF_API_TOKEN"),
|
||||
DataDir: os.Getenv("TLS_CERT_DIR"),
|
||||
Email: os.Getenv("TLS_EMAIL"),
|
||||
}
|
||||
}
|
||||
|
||||
// ListenAndServeTLS starts an HTTPS server.
|
||||
//
|
||||
// Three modes:
|
||||
// - TLS_DOMAIN + CF_API_TOKEN set → Let's Encrypt via Cloudflare DNS-01
|
||||
// - Neither set → self-signed certificate (generated on first run)
|
||||
// - Plain HTTP is never used
|
||||
func ListenAndServeTLS(addr string, handler http.Handler, cfg TLSConfig) error {
|
||||
if cfg.Domain != "" && cfg.CFToken != "" {
|
||||
return listenLetsEncrypt(addr, handler, cfg)
|
||||
}
|
||||
return listenSelfSigned(addr, handler, cfg)
|
||||
}
|
||||
|
||||
// listenLetsEncrypt uses certmagic for automatic Let's Encrypt certificates.
|
||||
func listenLetsEncrypt(addr string, handler http.Handler, cfg TLSConfig) error {
|
||||
if cfg.DataDir == "" {
|
||||
cfg.DataDir = "/opt/clavitor/certs"
|
||||
}
|
||||
if cfg.Email == "" {
|
||||
cfg.Email = "ops@clavitor.ai"
|
||||
}
|
||||
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = cfg.Email
|
||||
certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{
|
||||
DNSManager: certmagic.DNSManager{
|
||||
DNSProvider: &cloudflare.Provider{
|
||||
APIToken: cfg.CFToken,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
magic := certmagic.NewDefault()
|
||||
magic.Storage = &certmagic.FileStorage{Path: cfg.DataDir}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := magic.ManageSync(ctx, []string{cfg.Domain}); err != nil {
|
||||
return fmt.Errorf("certmagic manage %s: %w", cfg.Domain, err)
|
||||
}
|
||||
|
||||
tlsConfig := magic.TLSConfig()
|
||||
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
|
||||
|
||||
server := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
TLSConfig: tlsConfig,
|
||||
}
|
||||
|
||||
log.Printf("Clavitor listening on https://0.0.0.0%s (%s)", addr, cfg.Domain)
|
||||
return server.ListenAndServeTLS("", "")
|
||||
}
|
||||
|
||||
// listenSelfSigned generates a self-signed certificate and serves HTTPS.
|
||||
// Cert is stored in DataDir (or working directory) so it persists across restarts.
|
||||
// The browser will show a certificate warning — the connection is still encrypted.
|
||||
func listenSelfSigned(addr string, handler http.Handler, cfg TLSConfig) error {
|
||||
certDir := cfg.DataDir
|
||||
if certDir == "" {
|
||||
certDir = "."
|
||||
}
|
||||
certPath := filepath.Join(certDir, "clavitor.crt")
|
||||
keyPath := filepath.Join(certDir, "clavitor.key")
|
||||
|
||||
if err := ensureSelfSignedCert(certPath, keyPath); err != nil {
|
||||
return fmt.Errorf("self-signed cert: %w", err)
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
}
|
||||
|
||||
log.Printf("Clavitor listening on https://0.0.0.0%s (self-signed)", addr)
|
||||
return server.ListenAndServeTLS(certPath, keyPath)
|
||||
}
|
||||
|
||||
// ensureSelfSignedCert creates a self-signed TLS certificate if one doesn't exist.
|
||||
// Valid for 10 years, covers localhost, 127.0.0.1, and all local network IPs.
|
||||
func ensureSelfSignedCert(certPath, keyPath string) error {
|
||||
if _, err := os.Stat(certPath); err == nil {
|
||||
if _, err := os.Stat(keyPath); err == nil {
|
||||
return nil // both exist
|
||||
}
|
||||
}
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serial, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
|
||||
tmpl := x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{Organization: []string{"Clavitor"}, CommonName: "clavitor"},
|
||||
NotBefore: time.Now().Add(-time.Hour),
|
||||
NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: []string{"localhost"},
|
||||
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
|
||||
}
|
||||
|
||||
// Add all local interface IPs so LAN access works
|
||||
if addrs, err := net.InterfaceAddrs(); err == nil {
|
||||
for _, a := range addrs {
|
||||
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
tmpl.IPAddresses = append(tmpl.IPAddresses, ipnet.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &key.PublicKey, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write cert and key as PEM
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
keyDER, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyDER})
|
||||
|
||||
if err := os.WriteFile(certPath, certPEM, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Generated self-signed TLS certificate: %s", certPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChallengeError wraps ACME errors with actionable context.
|
||||
func ChallengeError(err error) string {
|
||||
if acmeErr, ok := err.(acme.Problem); ok {
|
||||
return fmt.Sprintf("ACME error: %s (type: %s)", acmeErr.Detail, acmeErr.Type)
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// SelfSignedTLSConfig returns a tls.Config with a fresh self-signed certificate.
|
||||
// Used by tests that need HTTPS without filesystem cert files.
|
||||
func SelfSignedTLSConfig() *tls.Config {
|
||||
key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
serial, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
tmpl := x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{CommonName: "test"},
|
||||
NotBefore: time.Now().Add(-time.Hour),
|
||||
NotAfter: time.Now().Add(24 * time.Hour),
|
||||
DNSNames: []string{"localhost"},
|
||||
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)},
|
||||
}
|
||||
certDER, _ := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &key.PublicKey, key)
|
||||
cert := tls.Certificate{
|
||||
Certificate: [][]byte{certDER},
|
||||
PrivateKey: key,
|
||||
}
|
||||
return &tls.Config{Certificates: []tls.Certificate{cert}}
|
||||
}
|
||||
|
|
@ -1,116 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
tokenPrefix = "cvt_"
|
||||
tokenBytes = 32 // 8 bytes L1 + 24 bytes random
|
||||
)
|
||||
|
||||
// base62 alphabet (digits + lowercase + uppercase)
|
||||
const base62Chars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// MintToken creates a new agent bearer token embedding the L1 key.
|
||||
// Returns the raw token (shown once) and its sha256 hex hash (stored in DB).
|
||||
// Token format: cvt_ + base62(l1Raw[8] + random[24])
|
||||
func MintToken(l1Raw []byte) (raw string, hash string) {
|
||||
buf := make([]byte, tokenBytes)
|
||||
copy(buf[:8], l1Raw)
|
||||
rand.Read(buf[8:])
|
||||
|
||||
raw = tokenPrefix + base62Encode(buf)
|
||||
hash = HashToken(raw)
|
||||
return
|
||||
}
|
||||
|
||||
// ParseToken extracts the L1 key (8 bytes, raw) from a cvt_ bearer token.
|
||||
// Returns l1Raw and the token hash for agent lookup.
|
||||
func ParseToken(raw string) (l1Raw []byte, hash string, err error) {
|
||||
if !strings.HasPrefix(raw, tokenPrefix) {
|
||||
return nil, "", fmt.Errorf("missing cvt_ prefix")
|
||||
}
|
||||
decoded, err := base62Decode(strings.TrimPrefix(raw, tokenPrefix))
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("invalid token encoding: %w", err)
|
||||
}
|
||||
if len(decoded) != tokenBytes {
|
||||
return nil, "", fmt.Errorf("invalid token length: got %d, want %d", len(decoded), tokenBytes)
|
||||
}
|
||||
l1Raw = decoded[:8]
|
||||
hash = HashToken(raw)
|
||||
return
|
||||
}
|
||||
|
||||
// HashToken returns the sha256 hex digest of a raw token string.
|
||||
func HashToken(raw string) string {
|
||||
h := sha256.Sum256([]byte(raw))
|
||||
return fmt.Sprintf("%x", h)
|
||||
}
|
||||
|
||||
// base62Encode encodes bytes as a base62 string.
|
||||
func base62Encode(data []byte) string {
|
||||
n := new(big.Int).SetBytes(data)
|
||||
base := big.NewInt(62)
|
||||
zero := big.NewInt(0)
|
||||
mod := new(big.Int)
|
||||
|
||||
var chars []byte
|
||||
for n.Cmp(zero) > 0 {
|
||||
n.DivMod(n, base, mod)
|
||||
chars = append(chars, base62Chars[mod.Int64()])
|
||||
}
|
||||
|
||||
// Preserve leading zeros
|
||||
for _, b := range data {
|
||||
if b != 0 {
|
||||
break
|
||||
}
|
||||
chars = append(chars, base62Chars[0])
|
||||
}
|
||||
|
||||
// Reverse
|
||||
for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 {
|
||||
chars[i], chars[j] = chars[j], chars[i]
|
||||
}
|
||||
|
||||
return string(chars)
|
||||
}
|
||||
|
||||
// base62Decode decodes a base62 string back to bytes.
|
||||
func base62Decode(s string) ([]byte, error) {
|
||||
n := new(big.Int)
|
||||
base := big.NewInt(62)
|
||||
|
||||
for _, c := range s {
|
||||
idx := strings.IndexRune(base62Chars, c)
|
||||
if idx < 0 {
|
||||
return nil, fmt.Errorf("invalid base62 character: %c", c)
|
||||
}
|
||||
n.Mul(n, base)
|
||||
n.Add(n, big.NewInt(int64(idx)))
|
||||
}
|
||||
|
||||
// Convert to fixed-size byte slice
|
||||
b := n.Bytes()
|
||||
|
||||
// Count leading zeros in the encoded string
|
||||
leadingZeros := 0
|
||||
for _, c := range s {
|
||||
if c == rune(base62Chars[0]) {
|
||||
leadingZeros++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Prepend zero bytes
|
||||
result := make([]byte, leadingZeros+len(b))
|
||||
copy(result[leadingZeros:], b)
|
||||
return result, nil
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMintToken_roundtrip(t *testing.T) {
|
||||
l1 := []byte{0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44}
|
||||
raw, hash := MintToken(l1)
|
||||
|
||||
if !strings.HasPrefix(raw, "cvt_") {
|
||||
t.Fatalf("token missing cvt_ prefix: %s", raw)
|
||||
}
|
||||
if hash == "" {
|
||||
t.Fatal("hash is empty")
|
||||
}
|
||||
|
||||
l1Out, hashOut, err := ParseToken(raw)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseToken: %v", err)
|
||||
}
|
||||
|
||||
if len(l1Out) != 8 {
|
||||
t.Fatalf("l1 length: got %d, want 8", len(l1Out))
|
||||
}
|
||||
for i := range l1 {
|
||||
if l1[i] != l1Out[i] {
|
||||
t.Fatalf("l1 mismatch at byte %d: got %x, want %x", i, l1Out[i], l1[i])
|
||||
}
|
||||
}
|
||||
|
||||
if hashOut != hash {
|
||||
t.Fatalf("hash mismatch: got %s, want %s", hashOut, hash)
|
||||
}
|
||||
|
||||
if HashToken(raw) != hash {
|
||||
t.Fatal("HashToken mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintToken_unique(t *testing.T) {
|
||||
l1 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
|
||||
r1, _ := MintToken(l1)
|
||||
r2, _ := MintToken(l1)
|
||||
if r1 == r2 {
|
||||
t.Fatal("two tokens should be different")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseToken_invalid(t *testing.T) {
|
||||
_, _, err := ParseToken("not_a_token")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid token")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
const tokenMapSchema = `
|
||||
CREATE TABLE IF NOT EXISTS token_map (
|
||||
token TEXT PRIMARY KEY,
|
||||
vault_id INTEGER NOT NULL
|
||||
);
|
||||
`
|
||||
|
||||
// TokenMap wraps node.db for token→vault_id lookups.
|
||||
type TokenMap struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// OpenTokenMap opens (or creates) the node.db token registry.
|
||||
func OpenTokenMap(dbPath string) (*TokenMap, error) {
|
||||
conn, err := sql.Open("sqlite3", dbPath+"?_journal_mode=WAL&_busy_timeout=5000")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open token map: %w", err)
|
||||
}
|
||||
if _, err := conn.Exec(tokenMapSchema); err != nil {
|
||||
conn.Close()
|
||||
return nil, fmt.Errorf("migrate token map: %w", err)
|
||||
}
|
||||
return &TokenMap{db: conn}, nil
|
||||
}
|
||||
|
||||
// Close closes the token map database.
|
||||
func (tm *TokenMap) Close() error {
|
||||
return tm.db.Close()
|
||||
}
|
||||
|
||||
// Register adds a token→vault_id mapping.
|
||||
func (tm *TokenMap) Register(token string, vaultID int64) error {
|
||||
_, err := tm.db.Exec(
|
||||
`INSERT OR REPLACE INTO token_map (token, vault_id) VALUES (?, ?)`,
|
||||
token, vaultID,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
// Lookup resolves a token to a vault_id. Returns 0, nil if not found.
|
||||
func (tm *TokenMap) Lookup(token string) (int64, error) {
|
||||
var vaultID int64
|
||||
err := tm.db.QueryRow(`SELECT vault_id FROM token_map WHERE token = ?`, token).Scan(&vaultID)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return 0, nil
|
||||
}
|
||||
return vaultID, err
|
||||
}
|
||||
|
||||
// Remove deletes a token mapping.
|
||||
func (tm *TokenMap) Remove(token string) error {
|
||||
_, err := tm.db.Exec(`DELETE FROM token_map WHERE token = ?`, token)
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveAllForVault removes all tokens for a vault.
|
||||
func (tm *TokenMap) RemoveAllForVault(vaultID int64) error {
|
||||
_, err := tm.db.Exec(`DELETE FROM token_map WHERE vault_id = ?`, vaultID)
|
||||
return err
|
||||
}
|
||||
|
|
@ -1,212 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HexID is an int64 that marshals to/from 16-char hex in JSON.
|
||||
type HexID int64
|
||||
|
||||
func (h HexID) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + IDToHex(int64(h)) + `"`), nil
|
||||
}
|
||||
|
||||
func (h *HexID) UnmarshalJSON(data []byte) error {
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return fmt.Errorf("HexID must be a quoted string")
|
||||
}
|
||||
v, err := HexToID(string(data[1 : len(data)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*h = HexID(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// VaultField represents a single field within a vault entry.
|
||||
type VaultField struct {
|
||||
Label string `json:"label"`
|
||||
Value string `json:"value"`
|
||||
Kind string `json:"kind"` // text|password|totp|url|file
|
||||
Section string `json:"section,omitempty"`
|
||||
L2 bool `json:"l2,omitempty"` // legacy: true = L3 in new model
|
||||
Tier int `json:"tier,omitempty"` // 1=L1, 2=L2 (agent), 3=L3 (hardware)
|
||||
}
|
||||
|
||||
// VaultFile represents an attached file.
|
||||
type VaultFile struct {
|
||||
Name string `json:"name"`
|
||||
MimeType string `json:"mime_type"`
|
||||
Size int64 `json:"size"`
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
// VaultData is the JSON structure packed into Entry.Data.
|
||||
type VaultData struct {
|
||||
Title string `json:"title"`
|
||||
Type string `json:"type"`
|
||||
Fields []VaultField `json:"fields"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
Expires string `json:"expires,omitempty"` // YYYY-MM-DD
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Files []VaultFile `json:"files,omitempty"`
|
||||
SourceModified int64 `json:"source_modified,omitempty"` // Unix seconds from import source; 0 = unknown
|
||||
}
|
||||
|
||||
// Entry is the core data model — single table for all vault items.
|
||||
type Entry struct {
|
||||
EntryID HexID `json:"entry_id"`
|
||||
ParentID HexID `json:"parent_id"` // folder entry_id, or 0 for root
|
||||
Type string `json:"type"` // credential|note|identity|card|ssh_key|totp|folder|custom
|
||||
Title string `json:"title"` // plaintext for UI
|
||||
TitleIdx []byte `json:"-"` // HMAC-SHA256 blind index for search
|
||||
Data []byte `json:"-"` // packed: zstd + AES-256-GCM
|
||||
DataLevel int `json:"data_level"` // 1=L1, 2=L2
|
||||
Scopes string `json:"scopes"` // comma-separated 4-char hex agent IDs, "" = owner-only
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
Version int `json:"version"` // optimistic locking
|
||||
DeletedAt *int64 `json:"deleted_at,omitempty"`
|
||||
Checksum *int64 `json:"-"` // reserved for tamper detection
|
||||
|
||||
// Unpacked field (not stored directly, populated after decrypt)
|
||||
VaultData *VaultData `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Session represents an authenticated session.
|
||||
type Session struct {
|
||||
Token string `json:"token"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
ExpiresAt int64 `json:"expires_at"`
|
||||
Actor string `json:"actor"` // web|extension|agent
|
||||
}
|
||||
|
||||
// AuditEvent represents a security audit log entry.
|
||||
type AuditEvent struct {
|
||||
EventID HexID `json:"event_id"`
|
||||
EntryID HexID `json:"entry_id,omitempty"`
|
||||
Title string `json:"title,omitempty"` // snapshot of entry title
|
||||
Action string `json:"action"` // read|fill|ai_read|create|update|delete|import|export
|
||||
Actor string `json:"actor"` // web|extension|agent
|
||||
IPAddr string `json:"ip_addr,omitempty"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
}
|
||||
|
||||
// WebAuthnCredential stores a registered WebAuthn credential.
|
||||
type WebAuthnCredential struct {
|
||||
CredID HexID `json:"cred_id"`
|
||||
Name string `json:"name"`
|
||||
PublicKey []byte `json:"public_key"`
|
||||
CredentialID []byte `json:"credential_id"` // raw WebAuthn credential ID from authenticator
|
||||
PRFSalt []byte `json:"prf_salt"`
|
||||
SignCount int `json:"sign_count"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
}
|
||||
|
||||
// DB wraps the database connection.
|
||||
type DB struct {
|
||||
Conn *sql.DB
|
||||
DBPath string // filesystem path to the SQLite file
|
||||
}
|
||||
|
||||
// Entry types
|
||||
const (
|
||||
TypeCredential = "credential"
|
||||
TypeCard = "card"
|
||||
TypeIdentity = "identity"
|
||||
TypeNote = "note"
|
||||
TypeSSHKey = "ssh_key"
|
||||
TypeTOTP = "totp"
|
||||
TypeFolder = "folder"
|
||||
TypeCustom = "custom"
|
||||
)
|
||||
|
||||
// Data levels
|
||||
const (
|
||||
DataLevelL1 = 1 // Server-side encrypted (AI-readable)
|
||||
DataLevelL2 = 2 // Client-side only (WebAuthn PRF)
|
||||
)
|
||||
|
||||
// Actor types
|
||||
const (
|
||||
ActorWeb = "web"
|
||||
ActorExtension = "extension"
|
||||
ActorAgent = "agent"
|
||||
)
|
||||
|
||||
// Action types
|
||||
const (
|
||||
ActionRead = "read"
|
||||
ActionFill = "fill"
|
||||
ActionAIRead = "ai_read"
|
||||
ActionCreate = "create"
|
||||
ActionUpdate = "update"
|
||||
ActionDelete = "delete"
|
||||
ActionImport = "import"
|
||||
ActionExport = "export"
|
||||
ActionAgentCreate = "agent_create"
|
||||
ActionAgentLock = "agent_lock"
|
||||
ActionAgentUnlock = "agent_unlock"
|
||||
ActionAgentRevoke = "agent_revoke"
|
||||
ActionVaultLock = "vault_lock"
|
||||
ActionVaultUnlock = "vault_unlock"
|
||||
ActionIPViolation = "ip_violation"
|
||||
ActionRateExceeded = "rate_exceeded"
|
||||
)
|
||||
|
||||
// Agent represents a bearer-token-authenticated agent with scoped access.
|
||||
type Agent struct {
|
||||
ID int64 `json:"id"`
|
||||
TokenHash string `json:"-"` // sha256 of raw cvt_ token — never exposed
|
||||
Name string `json:"name"`
|
||||
Scopes string `json:"scopes"` // comma-separated 4-char hex, "" = no scopes
|
||||
AllAccess bool `json:"all_access"` // bypass scope check, read all entries
|
||||
Admin bool `json:"admin"` // can manage agents/scopes (with WebAuthn)
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
}
|
||||
|
||||
// ScopeHex returns the agent's own ID as 4-char zero-padded hex.
|
||||
func (a *Agent) ScopeHex() string {
|
||||
return fmt.Sprintf("%04x", a.ID)
|
||||
}
|
||||
|
||||
// ScopeOwner is the reserved scope for vault owner / human access.
|
||||
// Entries with only this scope are invisible to agents.
|
||||
const ScopeOwner = "0000"
|
||||
|
||||
// AgentCanAccess checks if an agent can see an entry.
|
||||
// nil agent = legacy L1 auth (vault owner) = full access.
|
||||
//
|
||||
// Rules (read top to bottom, first match wins):
|
||||
// - nil agent → true (vault owner via raw L1)
|
||||
// - agent.AllAccess → true
|
||||
// - set intersection → true if any agent scope appears in entry scopes
|
||||
// - 0000 is owner-only — no agent ever has scope 0000
|
||||
func AgentCanAccess(agent *Agent, entryScopes string) bool {
|
||||
if agent == nil {
|
||||
return true
|
||||
}
|
||||
if agent.AllAccess {
|
||||
return true
|
||||
}
|
||||
if entryScopes == "" {
|
||||
return false
|
||||
}
|
||||
for _, as := range strings.Split(agent.Scopes, ",") {
|
||||
for _, es := range strings.Split(entryScopes, ",") {
|
||||
if as != "" && as == es {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VaultLock represents the vault-level lock state.
|
||||
type VaultLock struct {
|
||||
Locked bool `json:"locked"`
|
||||
LockedReason string `json:"locked_reason,omitempty"`
|
||||
LockedAt int64 `json:"locked_at,omitempty"`
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
)
|
||||
|
||||
func newBufReader(conn net.Conn) *bufio.Reader {
|
||||
return bufio.NewReader(conn)
|
||||
}
|
||||
|
|
@ -1,452 +0,0 @@
|
|||
// Package proxy implements an HTTPS MITM proxy with LLM-based policy evaluation.
|
||||
//
|
||||
// Architecture:
|
||||
// - Agent sets HTTP_PROXY=http://localhost:19840 (or configured port)
|
||||
// - For plain HTTP: proxy injects Authorization/headers, forwards
|
||||
// - For HTTPS: proxy performs CONNECT tunnel, generates per-host TLS cert (signed by local CA)
|
||||
// - Before injecting credentials: optional LLM policy evaluation (intent check)
|
||||
//
|
||||
// Credential injection:
|
||||
// - Scans request for placeholder patterns: {{clavitor.entry_title.field_label}}
|
||||
// - Also injects via per-host credential rules stored in vault
|
||||
// - Tier check: L2 fields are never injected (identity/card data)
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds proxy configuration.
|
||||
type Config struct {
|
||||
// ListenAddr is the proxy listen address, e.g. "127.0.0.1:19840"
|
||||
ListenAddr string
|
||||
|
||||
// DataDir is the vault data directory (for CA cert/key storage)
|
||||
DataDir string
|
||||
|
||||
// VaultKey is the L1 decryption key (to read credentials for injection)
|
||||
VaultKey []byte
|
||||
|
||||
// DBPath is path to the vault SQLite database
|
||||
DBPath string
|
||||
|
||||
// LLMEnabled enables LLM-based intent evaluation before credential injection
|
||||
LLMEnabled bool
|
||||
|
||||
// LLMBaseURL is the LLM API base URL (OpenAI-compatible)
|
||||
LLMBaseURL string
|
||||
|
||||
// LLMAPIKey is the API key for LLM requests
|
||||
LLMAPIKey string
|
||||
|
||||
// LLMModel is the model to use for policy evaluation
|
||||
LLMModel string
|
||||
}
|
||||
|
||||
// Proxy is the MITM proxy server.
|
||||
type Proxy struct {
|
||||
cfg Config
|
||||
ca *tls.Certificate
|
||||
caCert *x509.Certificate
|
||||
caKey *rsa.PrivateKey
|
||||
certMu sync.Mutex
|
||||
certs map[string]*tls.Certificate // hostname → generated cert (cache)
|
||||
}
|
||||
|
||||
// New creates a new Proxy. Generates or loads the CA cert from DataDir.
|
||||
func New(cfg Config) (*Proxy, error) {
|
||||
p := &Proxy{
|
||||
cfg: cfg,
|
||||
certs: make(map[string]*tls.Certificate),
|
||||
}
|
||||
if err := p.loadOrCreateCA(); err != nil {
|
||||
return nil, fmt.Errorf("proxy CA: %w", err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ListenAndServe starts the proxy server. Blocks until stopped.
|
||||
func (p *Proxy) ListenAndServe() error {
|
||||
ln, err := net.Listen("tcp", p.cfg.ListenAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("proxy listen %s: %w", p.cfg.ListenAddr, err)
|
||||
}
|
||||
log.Printf("proxy: listening on %s (LLM policy: %v)", p.cfg.ListenAddr, p.cfg.LLMEnabled)
|
||||
srv := &http.Server{
|
||||
Handler: p,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
}
|
||||
return srv.Serve(ln)
|
||||
}
|
||||
|
||||
// ServeHTTP handles all incoming proxy requests.
|
||||
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodConnect {
|
||||
p.handleCONNECT(w, r)
|
||||
return
|
||||
}
|
||||
p.handleHTTP(w, r)
|
||||
}
|
||||
|
||||
// handleHTTP handles plain HTTP proxy requests.
|
||||
func (p *Proxy) handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Remove proxy-specific headers
|
||||
r.RequestURI = ""
|
||||
r.Header.Del("Proxy-Connection")
|
||||
r.Header.Del("Proxy-Authenticate")
|
||||
r.Header.Del("Proxy-Authorization")
|
||||
|
||||
// Inject credentials if applicable
|
||||
if err := p.injectCredentials(r); err != nil {
|
||||
log.Printf("proxy: credential injection error for %s: %v", r.URL.Host, err)
|
||||
// Non-fatal: continue without injection
|
||||
}
|
||||
|
||||
// Forward the request
|
||||
rp := &httputil.ReverseProxy{
|
||||
Director: func(req *http.Request) {},
|
||||
}
|
||||
rp.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// handleCONNECT handles HTTPS CONNECT tunnel requests.
|
||||
func (p *Proxy) handleCONNECT(w http.ResponseWriter, r *http.Request) {
|
||||
host := r.Host
|
||||
if !strings.Contains(host, ":") {
|
||||
host = host + ":443"
|
||||
}
|
||||
hostname, _, _ := net.SplitHostPort(host)
|
||||
|
||||
// Acknowledge the CONNECT
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Hijack the connection
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
log.Printf("proxy: CONNECT hijack not supported")
|
||||
return
|
||||
}
|
||||
clientConn, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
log.Printf("proxy: CONNECT hijack error: %v", err)
|
||||
return
|
||||
}
|
||||
defer clientConn.Close()
|
||||
|
||||
// Generate a certificate for this hostname
|
||||
cert, err := p.certForHost(hostname)
|
||||
if err != nil {
|
||||
log.Printf("proxy: cert generation failed for %s: %v", hostname, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap client connection in TLS (using our MITM cert)
|
||||
tlsCfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{*cert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
tlsClientConn := tls.Server(clientConn, tlsCfg)
|
||||
defer tlsClientConn.Close()
|
||||
if err := tlsClientConn.Handshake(); err != nil {
|
||||
log.Printf("proxy: TLS handshake failed for %s: %v", hostname, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Connect to real upstream
|
||||
upstreamConn, err := tls.Dial("tcp", host, &tls.Config{
|
||||
ServerName: hostname,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("proxy: upstream dial failed for %s: %v", host, err)
|
||||
return
|
||||
}
|
||||
defer upstreamConn.Close()
|
||||
|
||||
// Intercept HTTP traffic between client and upstream
|
||||
p.interceptHTTP(tlsClientConn, upstreamConn, hostname)
|
||||
}
|
||||
|
||||
// interceptHTTP reads HTTP requests from the client, injects credentials, forwards to upstream.
|
||||
func (p *Proxy) interceptHTTP(clientConn net.Conn, upstreamConn net.Conn, hostname string) {
|
||||
// Use Go's http.ReadRequest to parse the client's request
|
||||
clientReader := newBufReader(clientConn)
|
||||
|
||||
for {
|
||||
req, err := http.ReadRequest(clientReader)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("proxy: read request error for %s: %v", hostname, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Set the correct URL for upstream forwarding
|
||||
req.URL.Scheme = "https"
|
||||
req.URL.Host = hostname
|
||||
req.RequestURI = ""
|
||||
|
||||
// Inject credentials
|
||||
if err := p.injectCredentials(req); err != nil {
|
||||
log.Printf("proxy: credential injection error for %s: %v", hostname, err)
|
||||
}
|
||||
|
||||
// Forward to upstream
|
||||
if err := req.Write(upstreamConn); err != nil {
|
||||
log.Printf("proxy: upstream write error for %s: %v", hostname, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Read upstream response and forward to client
|
||||
upstreamReader := newBufReader(upstreamConn)
|
||||
resp, err := http.ReadResponse(upstreamReader, req)
|
||||
if err != nil {
|
||||
log.Printf("proxy: upstream read error for %s: %v", hostname, err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := resp.Write(clientConn); err != nil {
|
||||
log.Printf("proxy: client write error for %s: %v", hostname, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// injectCredentials scans the request for credential placeholders and injects them.
|
||||
// Placeholder format: {{clavitor.entry_title.field_label}} in headers, URL, or body.
|
||||
// Also applies host-based automatic injection rules from vault.
|
||||
// L2 (identity/card) fields are NEVER injected.
|
||||
func (p *Proxy) injectCredentials(r *http.Request) error {
|
||||
if p.cfg.VaultKey == nil {
|
||||
return nil // No vault key — skip injection
|
||||
}
|
||||
|
||||
// Check for LLM policy evaluation
|
||||
if p.cfg.LLMEnabled {
|
||||
allowed, reason, err := p.evaluatePolicy(r)
|
||||
if err != nil {
|
||||
log.Printf("proxy: LLM policy eval error: %v (allowing)", err)
|
||||
} else if !allowed {
|
||||
log.Printf("proxy: LLM policy DENIED %s %s: %s", r.Method, r.URL, reason)
|
||||
return fmt.Errorf("policy denied: %s", reason)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement placeholder substitution once vault DB integration is wired in.
|
||||
// Pattern: scan r.Header values, r.URL, r.Body for {{clavitor.TITLE.FIELD}}
|
||||
// Lookup entry by title (case-insensitive), get field by label, verify Tier != L2
|
||||
// Replace placeholder with decrypted field value.
|
||||
//
|
||||
// Auto-injection (host rules):
|
||||
// Vault entries can specify "proxy_inject_hosts": ["api.github.com"] in metadata
|
||||
// When a request matches, inject the entry's L1 fields as headers per a configured map.
|
||||
//
|
||||
// This stub returns nil — no injection until DB wiring is complete.
|
||||
return nil
|
||||
}
|
||||
|
||||
// evaluatePolicy calls the configured LLM to evaluate whether this request
|
||||
// is consistent with the expected behavior of an AI agent (vs. exfiltration/abuse).
|
||||
func (p *Proxy) evaluatePolicy(r *http.Request) (allowed bool, reason string, err error) {
|
||||
if p.cfg.LLMBaseURL == "" || p.cfg.LLMAPIKey == "" {
|
||||
return true, "LLM not configured", nil
|
||||
}
|
||||
|
||||
// Build a concise request summary for the LLM
|
||||
summary := fmt.Sprintf("Method: %s\nHost: %s\nPath: %s\nContent-Type: %s",
|
||||
r.Method, r.Host, r.URL.Path,
|
||||
r.Header.Get("Content-Type"))
|
||||
|
||||
prompt := `You are a security policy evaluator for an AI agent credential proxy.
|
||||
|
||||
The following outbound HTTP request is about to have credentials injected and be forwarded.
|
||||
Evaluate whether this request is consistent with normal AI agent behavior (coding, API calls, deployment)
|
||||
vs. suspicious activity (credential exfiltration, unexpected destinations, data harvesting).
|
||||
|
||||
Request summary:
|
||||
` + summary + `
|
||||
|
||||
Respond with JSON only: {"allowed": true/false, "reason": "one sentence"}`
|
||||
|
||||
_ = prompt // Used when LLM call is implemented below
|
||||
|
||||
// TODO: Implement actual LLM call using cfg.LLMBaseURL + cfg.LLMAPIKey + cfg.LLMModel
|
||||
// For now: always allow (policy eval is opt-in, not blocking by default)
|
||||
// Real implementation: POST to /v1/chat/completions, parse JSON response
|
||||
return true, "policy evaluation not yet implemented", nil
|
||||
}
|
||||
|
||||
// certForHost returns a TLS certificate for the given hostname, generating one if needed.
|
||||
func (p *Proxy) certForHost(hostname string) (*tls.Certificate, error) {
|
||||
p.certMu.Lock()
|
||||
defer p.certMu.Unlock()
|
||||
|
||||
if cert, ok := p.certs[hostname]; ok {
|
||||
// Check if cert is still valid (> 1 hour remaining)
|
||||
if time.Until(cert.Leaf.NotAfter) > time.Hour {
|
||||
return cert, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a new cert signed by our CA
|
||||
cert, err := p.generateCert(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.certs[hostname] = cert
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// generateCert generates a TLS cert for the given hostname, signed by the proxy CA.
|
||||
func (p *Proxy) generateCert(hostname string) (*tls.Certificate, error) {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate key: %w", err)
|
||||
}
|
||||
|
||||
serial, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{CommonName: hostname},
|
||||
DNSNames: []string{hostname},
|
||||
NotBefore: time.Now().Add(-time.Minute),
|
||||
NotAfter: time.Now().Add(24 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
|
||||
// Add IP SAN if hostname is an IP
|
||||
if ip := net.ParseIP(hostname); ip != nil {
|
||||
tmpl.IPAddresses = []net.IP{ip}
|
||||
tmpl.DNSNames = nil
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, p.caCert, &key.PublicKey, p.caKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create cert: %w", err)
|
||||
}
|
||||
|
||||
leaf, err := x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse cert: %w", err)
|
||||
}
|
||||
|
||||
tlsCert := &tls.Certificate{
|
||||
Certificate: [][]byte{certDER},
|
||||
PrivateKey: key,
|
||||
Leaf: leaf,
|
||||
}
|
||||
return tlsCert, nil
|
||||
}
|
||||
|
||||
// loadOrCreateCA loads the proxy CA cert/key from DataDir, or generates new ones.
|
||||
func (p *Proxy) loadOrCreateCA() error {
|
||||
caDir := filepath.Join(p.cfg.DataDir, "proxy")
|
||||
if err := os.MkdirAll(caDir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
certPath := filepath.Join(caDir, "ca.crt")
|
||||
keyPath := filepath.Join(caDir, "ca.key")
|
||||
|
||||
// Try to load existing CA
|
||||
if _, err := os.Stat(certPath); err == nil {
|
||||
certPEM, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read CA cert: %w", err)
|
||||
}
|
||||
keyPEM, err := os.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read CA key: %w", err)
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse CA keypair: %w", err)
|
||||
}
|
||||
tlsCert.Leaf, err = x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse CA cert: %w", err)
|
||||
}
|
||||
// Check expiry — regenerate if < 7 days left
|
||||
if time.Until(tlsCert.Leaf.NotAfter) < 7*24*time.Hour {
|
||||
log.Printf("proxy: CA cert expires soon (%s), regenerating", tlsCert.Leaf.NotAfter.Format("2006-01-02"))
|
||||
} else {
|
||||
p.ca = &tlsCert
|
||||
p.caCert = tlsCert.Leaf
|
||||
p.caKey = tlsCert.PrivateKey.(*rsa.PrivateKey)
|
||||
log.Printf("proxy: loaded CA cert (expires %s)", tlsCert.Leaf.NotAfter.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Generate new CA
|
||||
log.Printf("proxy: generating new CA cert...")
|
||||
key, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate CA key: %w", err)
|
||||
}
|
||||
|
||||
serial, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{CommonName: "Clavitor Proxy CA", Organization: []string{"Clavitor"}},
|
||||
NotBefore: time.Now().Add(-time.Minute),
|
||||
NotAfter: time.Now().Add(365 * 24 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
MaxPathLen: 0,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &key.PublicKey, key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create CA cert: %w", err)
|
||||
}
|
||||
leaf, _ := x509.ParseCertificate(certDER)
|
||||
|
||||
// Write to disk
|
||||
certFile, err := os.OpenFile(certPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write CA cert: %w", err)
|
||||
}
|
||||
pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
certFile.Close()
|
||||
|
||||
keyFile, err := os.OpenFile(keyPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write CA key: %w", err)
|
||||
}
|
||||
pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
|
||||
keyFile.Close()
|
||||
|
||||
p.ca = &tls.Certificate{Certificate: [][]byte{certDER}, PrivateKey: key, Leaf: leaf}
|
||||
p.caCert = leaf
|
||||
p.caKey = key
|
||||
|
||||
log.Printf("proxy: CA cert generated at %s (install in OS trust store or pass --proxy-ca)", certPath)
|
||||
log.Printf("proxy: CA cert path: %s", certPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CACertPath returns the path to the proxy CA certificate (for user installation).
|
||||
func (p *Proxy) CACertPath() string {
|
||||
return filepath.Join(p.cfg.DataDir, "proxy", "ca.crt")
|
||||
}
|
||||
|
|
@ -3,7 +3,6 @@ package api
|
|||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
|
|
@ -341,8 +340,10 @@ func (h *Handlers) AuthRegisterComplete(w http.ResponseWriter, r *http.Request)
|
|||
CredentialID []byte `json:"credential_id"`
|
||||
PublicKey []byte `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
MasterKey []byte `json:"master_key"` // Full 32-byte PRF master key (derived with hardcoded salt)
|
||||
AuthenticatorAttachment string `json:"authenticator_attachment"` // platform, hybrid, cross-platform
|
||||
L1 []byte `json:"l1"` // 8 bytes — server slices L0 from this[:4]
|
||||
P1 []byte `json:"p1"` // 8 bytes — browser-derived WL3 lookup token
|
||||
WrappedL3 []byte `json:"wrapped_l3"` // opaque blob; server stores, never decrypts
|
||||
AuthenticatorAttachment string `json:"authenticator_attachment"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body")
|
||||
|
|
@ -353,16 +354,27 @@ func (h *Handlers) AuthRegisterComplete(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
// Server vetos: L2, L3, and the 32-byte master key never travel here.
|
||||
// The browser sends only what the server is allowed to see: L1 (for vault
|
||||
// routing + entry encryption), P1 (for WL3 lookup), and the opaque
|
||||
// wrapped_l3 blob. Validate hard.
|
||||
if len(req.L1) != 8 {
|
||||
ErrorResponse(w, http.StatusBadRequest, "invalid_l1", "L1 must be exactly 8 bytes")
|
||||
return
|
||||
}
|
||||
if len(req.P1) != 8 {
|
||||
ErrorResponse(w, http.StatusBadRequest, "invalid_p1", "P1 must be exactly 8 bytes")
|
||||
return
|
||||
}
|
||||
if len(req.WrappedL3) == 0 {
|
||||
ErrorResponse(w, http.StatusBadRequest, "missing_wrapped_l3", "wrapped_l3 is required")
|
||||
return
|
||||
}
|
||||
|
||||
db := h.db(r)
|
||||
if db == nil && len(req.PublicKey) > 0 {
|
||||
// L0 = first 4 bytes of master key for vault naming
|
||||
var l0 []byte
|
||||
if len(req.MasterKey) >= 4 {
|
||||
l0 = req.MasterKey[:4]
|
||||
} else {
|
||||
hash := sha256.Sum256(req.PublicKey)
|
||||
l0 = hash[:4]
|
||||
}
|
||||
if db == nil {
|
||||
// L0 = L1[:4] for vault file naming.
|
||||
l0 := req.L1[:4]
|
||||
dbName := "clavitor-" + base64UrlEncode(l0)
|
||||
dbPath := filepath.Join(h.Cfg.DataDir, dbName)
|
||||
newDB, err := lib.OpenDB(dbPath)
|
||||
|
|
@ -371,8 +383,8 @@ func (h *Handlers) AuthRegisterComplete(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
defer newDB.Close()
|
||||
if err := lib.MigrateDB(newDB); err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "db_migrate_failed", "Failed to initialize vault")
|
||||
if err := lib.InitSchema(newDB); err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "db_init_failed", "Failed to initialize vault")
|
||||
return
|
||||
}
|
||||
db = newDB
|
||||
|
|
@ -396,6 +408,26 @@ func (h *Handlers) AuthRegisterComplete(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
// Write WL3 credential file. The browser pre-computed P1 (HKDF over its
|
||||
// master key) and pre-wrapped L3 with its raw PRF output. The server is a
|
||||
// dumb store: it persists what arrives and derives nothing. wrapped_l3 is
|
||||
// opaque to the server — only a future device tap can unwrap it.
|
||||
wl3Entry := &lib.WL3Entry{
|
||||
P1: hex.EncodeToString(req.P1),
|
||||
L0: hex.EncodeToString(req.L1[:4]),
|
||||
WrappedL3: base64UrlEncode(req.WrappedL3),
|
||||
CredentialID: base64UrlEncode(req.CredentialID),
|
||||
PublicKey: base64UrlEncode(req.PublicKey),
|
||||
HomePOP: "", // empty in community; commercial sets this from edition config
|
||||
CreatedAt: time.Now().Unix(),
|
||||
}
|
||||
if err := lib.WL3Write(h.Cfg.WL3Dir, wl3Entry); err != nil {
|
||||
// SECURITY.md: visible failure, no silent fallback. Registration fails loud.
|
||||
log.Printf("WL3 write failed: %v", err)
|
||||
ErrorResponse(w, http.StatusInternalServerError, "wl3_write_failed", "Failed to persist credential lookup record")
|
||||
return
|
||||
}
|
||||
|
||||
// Get all registered credential types for response
|
||||
creds, _ := lib.GetWebAuthnCredentials(db)
|
||||
typeMap := make(map[string]bool)
|
||||
|
|
@ -423,13 +455,37 @@ func (h *Handlers) AuthLoginBegin(w http.ResponseWriter, r *http.Request) {
|
|||
db := h.db(r)
|
||||
if db == nil {
|
||||
var req struct {
|
||||
P1 string `json:"p1"` // Hex-encoded P1 (8 bytes) — preferred lookup
|
||||
L1 string `json:"l1"` // Base64-encoded L1 (8 bytes)
|
||||
L0 string `json:"l0"` // Base64-encoded L0 (4 bytes) - from localStorage hint
|
||||
CredentialIDs []string `json:"credential_ids"` // Credential IDs from localStorage hint
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err == nil {
|
||||
// Try P1 first (preferred — looks up the WL3 file to find L0 + home_pop).
|
||||
// The browser computes P1 client-side via HKDF over its master key and
|
||||
// sends it. The server NEVER derives P1 — it only receives and stores it.
|
||||
// P1 alone leaks no decryption capability; login still requires WebAuthn.
|
||||
if req.P1 != "" {
|
||||
p1Bytes, err := hex.DecodeString(req.P1)
|
||||
if err == nil && len(p1Bytes) == 8 {
|
||||
if entry, err := lib.WL3Read(h.Cfg.WL3Dir, p1Bytes); err == nil {
|
||||
l0Bytes, err := hex.DecodeString(entry.L0)
|
||||
if err == nil && len(l0Bytes) == 4 {
|
||||
dbPath := filepath.Join(h.Cfg.DataDir, "clavitor-"+base64UrlEncode(l0Bytes))
|
||||
openedDB, err := lib.OpenDB(dbPath)
|
||||
if err == nil {
|
||||
defer openedDB.Close()
|
||||
ctx := context.WithValue(r.Context(), ctxDB, openedDB)
|
||||
r = r.WithContext(ctx)
|
||||
db = openedDB
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try L1 first (from session)
|
||||
if req.L1 != "" {
|
||||
if db == nil && req.L1 != "" {
|
||||
l1Bytes, err := base64.RawURLEncoding.DecodeString(req.L1)
|
||||
if err == nil && len(l1Bytes) >= 4 {
|
||||
l0 := l1Bytes[:4] // Derive L0 from L1
|
||||
|
|
@ -662,6 +718,13 @@ func (h *Handlers) GetEntry(w http.ResponseWriter, r *http.Request) {
|
|||
ErrorResponse(w, http.StatusForbidden, "forbidden", "Access denied")
|
||||
return
|
||||
}
|
||||
// Per-agent unique-entries quota: repeated reads of the same entry are free,
|
||||
// only distinct entries count toward the limit. Hour-limit hits trigger the
|
||||
// strike-and-lock policy inside agentReadEntry.
|
||||
if !agentReadEntry(agent, lib.IDToHex(int64(entry.EntryID)), h.db(r), h.vk(r)) {
|
||||
ErrorResponse(w, http.StatusTooManyRequests, "rate_limited", "Agent rate limit exceeded")
|
||||
return
|
||||
}
|
||||
if actor == lib.ActorAgent && entry.VaultData != nil {
|
||||
stripL3Fields(entry.VaultData)
|
||||
}
|
||||
|
|
@ -1203,12 +1266,19 @@ func (h *Handlers) HandleListAlternates(w http.ResponseWriter, r *http.Request)
|
|||
ErrorResponse(w, http.StatusInternalServerError, "list_failed", "Failed to list alternates")
|
||||
return
|
||||
}
|
||||
// Filter by agent scope
|
||||
// Filter by agent scope and per-agent unique-entries quota.
|
||||
// Each alternate counts as a distinct entry — but if the agent has fetched
|
||||
// some of them before in the current window, those are free.
|
||||
var result []lib.Entry
|
||||
for _, e := range entries {
|
||||
if lib.AgentCanAccess(agent, e.Scopes) {
|
||||
result = append(result, e)
|
||||
if !lib.AgentCanAccess(agent, e.Scopes) {
|
||||
continue
|
||||
}
|
||||
if !agentReadEntry(agent, lib.IDToHex(int64(e.EntryID)), h.db(r), h.vk(r)) {
|
||||
ErrorResponse(w, http.StatusTooManyRequests, "rate_limited", "Agent rate limit exceeded")
|
||||
return
|
||||
}
|
||||
result = append(result, e)
|
||||
}
|
||||
JSONResponse(w, http.StatusOK, map[string]any{"alternates": result})
|
||||
}
|
||||
|
|
@ -1243,6 +1313,12 @@ func (h *Handlers) MatchURL(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
for _, u := range entry.VaultData.URLs {
|
||||
if strings.Contains(u, domain) || strings.Contains(domain, extractDomain(u)) {
|
||||
// Per-agent unique-entries quota: matching this entry counts as
|
||||
// reading it. Repeats inside the same window are free.
|
||||
if !agentReadEntry(agent, lib.IDToHex(int64(entry.EntryID)), h.db(r), h.vk(r)) {
|
||||
ErrorResponse(w, http.StatusTooManyRequests, "rate_limited", "Agent rate limit exceeded")
|
||||
return
|
||||
}
|
||||
// Extract username for grouping
|
||||
username := ""
|
||||
for _, f := range entry.VaultData.Fields {
|
||||
|
|
@ -1342,10 +1418,12 @@ func (h *Handlers) HandleCreateAgent(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Scopes string `json:"scopes"`
|
||||
AllAccess bool `json:"all_access"`
|
||||
Admin bool `json:"admin"`
|
||||
Name string `json:"name"`
|
||||
Scopes string `json:"scopes"`
|
||||
AllAccess bool `json:"all_access"`
|
||||
Admin bool `json:"admin"`
|
||||
RateLimitMinute int `json:"rate_limit_minute"` // 0 → use safe default
|
||||
RateLimitHour int `json:"rate_limit_hour"` // 0 → use safe default
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
ErrorResponse(w, http.StatusBadRequest, "invalid_json", "Invalid request body")
|
||||
|
|
@ -1355,10 +1433,27 @@ func (h *Handlers) HandleCreateAgent(w http.ResponseWriter, r *http.Request) {
|
|||
ErrorResponse(w, http.StatusBadRequest, "missing_name", "Name is required")
|
||||
return
|
||||
}
|
||||
// Safe defaults for the unique-entries quota. Real agent flows touch 1–3
|
||||
// credentials per task; defaults catch a harvester within minutes without
|
||||
// breaking normal use. Owner can override per-agent at creation or via
|
||||
// HandleUpdateAgent later.
|
||||
if req.RateLimitMinute == 0 {
|
||||
req.RateLimitMinute = 3
|
||||
}
|
||||
if req.RateLimitHour == 0 {
|
||||
req.RateLimitHour = 10
|
||||
}
|
||||
// DESIGN NOTE: Empty scopes with all_access=false is intentional.
|
||||
// This allows users to create a "blocked" agent that cannot access any entries,
|
||||
// effectively quarantining a rogue agent without deleting it.
|
||||
agent, err := lib.AgentCreate(h.db(r), h.vk(r), h.l0(r), "", req.Name, req.Scopes, req.AllAccess, req.Admin)
|
||||
agent, err := lib.AgentCreate(h.db(r), h.vk(r), h.l0(r), lib.AgentCreateOpts{
|
||||
Name: req.Name,
|
||||
Scopes: req.Scopes,
|
||||
AllAccess: req.AllAccess,
|
||||
Admin: req.Admin,
|
||||
RateLimit: req.RateLimitMinute,
|
||||
RateLimitHour: req.RateLimitHour,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "create_failed", "Failed to create agent")
|
||||
return
|
||||
|
|
@ -1394,14 +1489,35 @@ func (h *Handlers) HandleListAgents(w http.ResponseWriter, r *http.Request) {
|
|||
if e.Type != lib.TypeAgent || e.VaultData == nil {
|
||||
continue
|
||||
}
|
||||
// Split allowed_ips into a list for the UI.
|
||||
var ipList []string
|
||||
if e.VaultData.AllowedIPs != "" {
|
||||
for _, ip := range strings.Split(e.VaultData.AllowedIPs, ",") {
|
||||
ip = strings.TrimSpace(ip)
|
||||
if ip != "" {
|
||||
ipList = append(ipList, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
status := "active"
|
||||
if e.VaultData.Locked {
|
||||
status = "locked"
|
||||
}
|
||||
agents = append(agents, map[string]any{
|
||||
"agent_id": e.VaultData.AgentID,
|
||||
"name": e.Title,
|
||||
"scopes": e.VaultData.Scopes,
|
||||
"all_access": e.VaultData.AllAccess,
|
||||
"admin": e.VaultData.Admin,
|
||||
"entry_id": e.EntryID,
|
||||
"created_at": e.CreatedAt,
|
||||
"id": lib.IDToHex(int64(e.EntryID)), // for /api/agents/{id}/* paths
|
||||
"entry_id": e.EntryID, // legacy numeric, kept for compat
|
||||
"agent_id": e.VaultData.AgentID,
|
||||
"name": e.Title,
|
||||
"scopes": e.VaultData.Scopes,
|
||||
"all_access": e.VaultData.AllAccess,
|
||||
"admin": e.VaultData.Admin,
|
||||
"status": status,
|
||||
"locked": e.VaultData.Locked,
|
||||
"last_strike_at": e.VaultData.LastStrikeAt,
|
||||
"rate_limit_minute": e.VaultData.RateLimit,
|
||||
"rate_limit_hour": e.VaultData.RateLimitHour,
|
||||
"ip_whitelist": ipList,
|
||||
"created_at": e.CreatedAt,
|
||||
})
|
||||
}
|
||||
if agents == nil {
|
||||
|
|
@ -1485,7 +1601,9 @@ func (h *Handlers) HandleUpdateAgent(w http.ResponseWriter, r *http.Request) {
|
|||
if req.RateLimitMinute > 0 {
|
||||
entry.VaultData.RateLimit = req.RateLimitMinute
|
||||
}
|
||||
// Note: RateLimitHour is not stored separately in current model
|
||||
if req.RateLimitHour > 0 {
|
||||
entry.VaultData.RateLimitHour = req.RateLimitHour
|
||||
}
|
||||
// Re-encrypt and update entry
|
||||
if err := lib.EntryUpdate(h.db(r), h.vk(r), entry); err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "update_failed", "Failed to update agent")
|
||||
|
|
@ -1498,6 +1616,41 @@ func (h *Handlers) HandleUpdateAgent(w http.ResponseWriter, r *http.Request) {
|
|||
JSONResponse(w, http.StatusOK, map[string]string{"status": "updated"})
|
||||
}
|
||||
|
||||
// HandleUnlockAgent clears the locked state on an agent. Requires admin token
|
||||
// (PRF tap). Resets LastStrikeAt so the agent gets a clean strike clock.
|
||||
func (h *Handlers) HandleUnlockAgent(w http.ResponseWriter, r *http.Request) {
|
||||
if h.requireAdmin(w, r) {
|
||||
return
|
||||
}
|
||||
entryID, err := lib.HexToID(chi.URLParam(r, "id"))
|
||||
if err != nil {
|
||||
ErrorResponse(w, http.StatusBadRequest, "invalid_id", "Invalid entry ID")
|
||||
return
|
||||
}
|
||||
entry, err := lib.EntryGet(h.db(r), h.vk(r), entryID)
|
||||
if err != nil {
|
||||
if err == lib.ErrNotFound {
|
||||
ErrorResponse(w, http.StatusNotFound, "not_found", "Agent not found")
|
||||
} else {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "get_failed", "Failed to retrieve agent")
|
||||
}
|
||||
return
|
||||
}
|
||||
if entry.Type != lib.TypeAgent {
|
||||
ErrorResponse(w, http.StatusNotFound, "not_found", "Agent not found")
|
||||
return
|
||||
}
|
||||
if err := lib.AgentUnlock(h.db(r), h.vk(r), lib.HexID(entryID)); err != nil {
|
||||
ErrorResponse(w, http.StatusInternalServerError, "unlock_failed", "Failed to unlock agent")
|
||||
return
|
||||
}
|
||||
lib.AuditLog(h.db(r), &lib.AuditEvent{
|
||||
Action: "agent_unlocked", Actor: ActorFromContext(r.Context()),
|
||||
IPAddr: realIP(r), Title: entry.Title,
|
||||
})
|
||||
JSONResponse(w, http.StatusOK, map[string]string{"status": "unlocked"})
|
||||
}
|
||||
|
||||
func (h *Handlers) HandleUpdateEntryScopes(w http.ResponseWriter, r *http.Request) {
|
||||
if h.requireAdmin(w, r) {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -71,8 +71,8 @@ func newTestClient(t *testing.T) *tc {
|
|||
if err != nil {
|
||||
t.Fatalf("opendb: %v", err)
|
||||
}
|
||||
if err := lib.MigrateDB(db); err != nil {
|
||||
t.Fatalf("migrate: %v", err)
|
||||
if err := lib.InitSchema(db); err != nil {
|
||||
t.Fatalf("init schema: %v", err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
|
|
@ -103,33 +103,6 @@ func (c *tc) req(method, path string, body any) *http.Response {
|
|||
return resp
|
||||
}
|
||||
|
||||
// reqAgent sends a request using a cvt_ wire token.
|
||||
func (c *tc) reqAgent(credential, method, path string, body any) *http.Response {
|
||||
c.t.Helper()
|
||||
|
||||
// Convert client credential (0x01) → wire token (0x00)
|
||||
wire, _, _, err := lib.CredentialToWire(credential)
|
||||
if err != nil {
|
||||
c.t.Fatalf("CredentialToWire: %v", err)
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if body != nil {
|
||||
b, _ := json.Marshal(body)
|
||||
r = bytes.NewReader(b)
|
||||
}
|
||||
req, _ := http.NewRequest(method, c.srv.URL+path, r)
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+wire)
|
||||
resp, err := c.srv.Client().Do(req)
|
||||
if err != nil {
|
||||
c.t.Fatalf("req %s %s: %v", method, path, err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// reqNoAuth sends an unauthenticated request.
|
||||
func (c *tc) reqNoAuth(method, path string, body any) *http.Response {
|
||||
c.t.Helper()
|
||||
|
|
|
|||
|
|
@ -2,28 +2,17 @@ package api
|
|||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//go:embed importer-mappings.json
|
||||
var importerMappingsJSON []byte
|
||||
|
||||
// HandleMappings serves the importer field mappings JSON.
|
||||
// This allows the frontend to dynamically load field mappings
|
||||
// without hardcoding them in JavaScript.
|
||||
// HandleMappings serves the importer field mappings JSON to the browser.
|
||||
// All import parsing happens client-side; this endpoint exists only to
|
||||
// give the JS parsers their format spec.
|
||||
func (h *Handlers) HandleMappings(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Cache-Control", "public, max-age=3600")
|
||||
w.Write(importerMappingsJSON)
|
||||
}
|
||||
|
||||
// LoadMappings parses the embedded mappings.
|
||||
// Used internally if Go code needs to access the mappings.
|
||||
func LoadMappings() (map[string]interface{}, error) {
|
||||
var mappings map[string]interface{}
|
||||
if err := json.Unmarshal(importerMappingsJSON, &mappings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -155,14 +155,26 @@ func L1Middleware(dataDir string) func(http.Handler) http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
// Per-agent rate limiting
|
||||
if agent.RateLimit > 0 {
|
||||
if !agentRateLimiter.allow(agent.AgentID, agent.RateLimit) {
|
||||
ErrorResponse(w, http.StatusTooManyRequests, "rate_limited", "Agent rate limit exceeded")
|
||||
return
|
||||
}
|
||||
// Locked agents are refused immediately, before any handler runs.
|
||||
// This is the second-strike state — the owner has to PRF-unlock
|
||||
// before this agent can do anything again.
|
||||
if agent.Locked {
|
||||
lib.AuditLog(db, &lib.AuditEvent{
|
||||
Action: "agent_locked_request_refused",
|
||||
Actor: lib.ActorAgent,
|
||||
Title: agent.Name,
|
||||
IPAddr: clientIP,
|
||||
})
|
||||
ErrorResponse(w, http.StatusLocked, "agent_locked",
|
||||
"Agent is locked. Owner unlock required.")
|
||||
return
|
||||
}
|
||||
|
||||
// Per-agent rate limiting (unique-entries quota) is enforced in
|
||||
// handlers that read a specific entry — GetEntry, MatchURL,
|
||||
// HandleListAlternates — via agentReadEntry(). Middleware
|
||||
// doesn't know which entry the agent is asking for here.
|
||||
|
||||
ctx := context.WithValue(r.Context(), ctxDB, db)
|
||||
ctx = context.WithValue(ctx, ctxVaultKey, l1Key)
|
||||
ctx = context.WithValue(ctx, ctxActor, lib.ActorAgent)
|
||||
|
|
@ -224,19 +236,32 @@ func (w *statusWriter) WriteHeader(code int) {
|
|||
w.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
// RateLimitMiddleware implements per-IP rate limiting.
|
||||
// RateLimitMiddleware implements per-(IP, method, path) rate limiting.
|
||||
//
|
||||
// The bucket key is the *request identity*, not just the source IP. Same
|
||||
// endpoint from the same IP shares a counter; different endpoints get
|
||||
// independent counters. This means:
|
||||
//
|
||||
// - SPA loading 8 different endpoints on first paint: 8 buckets at count 1,
|
||||
// none blocked.
|
||||
// - Brute-forcer hammering /api/auth/login/complete with different bodies:
|
||||
// one bucket, blocked at requestsPerMinute attempts. Body is intentionally
|
||||
// NOT part of the key — if it were, varying the body would bypass the
|
||||
// limiter and brute-force protection would be gone.
|
||||
// - Polling the same endpoint every few seconds: shares a bucket, counts up.
|
||||
// Blocked at requestsPerMinute, which is what we want.
|
||||
func RateLimitMiddleware(requestsPerMinute int) func(http.Handler) http.Handler {
|
||||
var mu sync.Mutex
|
||||
clients := make(map[string]*rateLimitEntry)
|
||||
buckets := make(map[string]*rateLimitEntry)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Minute)
|
||||
mu.Lock()
|
||||
now := time.Now()
|
||||
for ip, entry := range clients {
|
||||
for k, entry := range buckets {
|
||||
if now.Sub(entry.windowStart) > time.Minute {
|
||||
delete(clients, ip)
|
||||
delete(buckets, k)
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
|
|
@ -245,13 +270,25 @@ func RateLimitMiddleware(requestsPerMinute int) func(http.Handler) http.Handler
|
|||
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ip := realIP(r)
|
||||
// Owner-only bulk endpoints are exempt from the global rate limit.
|
||||
// /api/entries/batch refuses agents at the handler entry
|
||||
// (CreateEntryBatch returns 403 for any cvt_-token request), so this
|
||||
// path is owner-only by handler enforcement. The harvester defense
|
||||
// for agent-reachable paths lives in the per-agent unique-entries
|
||||
// quota (agentReadEntry), not here. Throttling the import flow
|
||||
// would only DOS the legitimate import — no defense gained.
|
||||
if r.URL.Path == "/api/entries/batch" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
key := realIP(r) + "|" + r.Method + "|" + r.URL.Path
|
||||
mu.Lock()
|
||||
entry, exists := clients[ip]
|
||||
entry, exists := buckets[key]
|
||||
now := time.Now()
|
||||
if !exists || now.Sub(entry.windowStart) > time.Minute {
|
||||
entry = &rateLimitEntry{windowStart: now, count: 0}
|
||||
clients[ip] = entry
|
||||
buckets[key] = entry
|
||||
}
|
||||
entry.count++
|
||||
count := entry.count
|
||||
|
|
@ -271,23 +308,42 @@ type rateLimitEntry struct {
|
|||
count int
|
||||
}
|
||||
|
||||
// Per-agent rate limiter (keyed by agent ID, not IP).
|
||||
// Per-agent rate limiter — tracks UNIQUE entry IDs read per minute / per hour.
|
||||
//
|
||||
// Repeated reads of the same credential do NOT count: an agent legitimately
|
||||
// re-fetching the same credential to log into the same site many times stays
|
||||
// at unique-count = 1. The limit fires only when the agent starts touching
|
||||
// many *different* credentials, which is the harvesting pattern we care about.
|
||||
//
|
||||
// Two windows run independently:
|
||||
// - RateLimit → unique entries per rolling minute
|
||||
// - RateLimitHour → unique entries per rolling hour
|
||||
//
|
||||
// A limit of 0 means unlimited for that window.
|
||||
var agentRateLimiter = newAgentLimiter()
|
||||
|
||||
type agentLimiterEntry struct {
|
||||
minuteWindowStart time.Time
|
||||
minuteEntries map[string]struct{}
|
||||
hourWindowStart time.Time
|
||||
hourEntries map[string]struct{}
|
||||
}
|
||||
|
||||
type agentLimiter struct {
|
||||
mu sync.Mutex
|
||||
agents map[string]*rateLimitEntry
|
||||
agents map[string]*agentLimiterEntry
|
||||
}
|
||||
|
||||
func newAgentLimiter() *agentLimiter {
|
||||
al := &agentLimiter{agents: make(map[string]*rateLimitEntry)}
|
||||
al := &agentLimiter{agents: make(map[string]*agentLimiterEntry)}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Minute)
|
||||
time.Sleep(5 * time.Minute)
|
||||
al.mu.Lock()
|
||||
now := time.Now()
|
||||
for id, e := range al.agents {
|
||||
if now.Sub(e.windowStart) > time.Minute {
|
||||
// Drop agents whose hour window has expired (no recent activity).
|
||||
if now.Sub(e.hourWindowStart) > time.Hour {
|
||||
delete(al.agents, id)
|
||||
}
|
||||
}
|
||||
|
|
@ -297,17 +353,136 @@ func newAgentLimiter() *agentLimiter {
|
|||
return al
|
||||
}
|
||||
|
||||
func (al *agentLimiter) allow(agentID string, maxPerMinute int) bool {
|
||||
// LimitResult signals which window (if any) blocked an agent read.
|
||||
type LimitResult int
|
||||
|
||||
const (
|
||||
LimitAllowed LimitResult = iota
|
||||
LimitMinuteHit // Minute-window cap reached. Soft throttle, no strike.
|
||||
LimitHourHit // Hour-window cap reached. Strike — caller must persist.
|
||||
)
|
||||
|
||||
// agentReadEntry enforces the per-agent unique-entries quota when an agent
|
||||
// fetches a credential. Call immediately after the AgentCanAccess scope check.
|
||||
//
|
||||
// On hour-limit hit, applies the strike-and-lock policy:
|
||||
// - First strike (or > 2h since last strike): record the strike, throttle.
|
||||
// - Second strike within 2h: lock the agent, persist Locked=true, audit-log.
|
||||
//
|
||||
// Both persistence and audit happen in here so the call sites stay one-liners.
|
||||
//
|
||||
// Returns true if the read may proceed; false if blocked. No-op for nil agent
|
||||
// (vault owner / web UI) and for agents with both limits set to 0 (unlimited).
|
||||
func agentReadEntry(agent *lib.AgentData, entryID string, db *lib.DB, vk []byte) bool {
|
||||
if agent == nil {
|
||||
return true
|
||||
}
|
||||
if agent.Locked {
|
||||
// Belt-and-suspenders. L1Middleware blocks locked agents at the top
|
||||
// before any handler runs, but if that check is ever bypassed this
|
||||
// catches it at the per-entry layer.
|
||||
return false
|
||||
}
|
||||
if agent.RateLimit == 0 && agent.RateLimitHour == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
result := agentRateLimiter.checkEntry(agent.AgentID, entryID, agent.RateLimit, agent.RateLimitHour)
|
||||
switch result {
|
||||
case LimitAllowed:
|
||||
return true
|
||||
case LimitMinuteHit:
|
||||
return false // soft throttle, no strike
|
||||
case LimitHourHit:
|
||||
// Strike-and-lock policy.
|
||||
now := time.Now().Unix()
|
||||
const strikeWindowSeconds = 2 * 60 * 60 // 2 hours
|
||||
secondStrike := agent.LastStrikeAt > 0 && (now-agent.LastStrikeAt) < strikeWindowSeconds
|
||||
|
||||
if secondStrike {
|
||||
// Lock the agent. Persist + audit + update in-memory state.
|
||||
if err := lib.AgentLockWithStrike(db, vk, agent.EntryID, now); err != nil {
|
||||
log.Printf("agent %s: failed to persist lock: %v", agent.Name, err)
|
||||
}
|
||||
lib.AuditLog(db, &lib.AuditEvent{
|
||||
Action: "agent_locked", Actor: lib.ActorAgent,
|
||||
Title: agent.Name,
|
||||
})
|
||||
log.Printf("agent %s: LOCKED after second hour-limit strike (last strike %ds ago)",
|
||||
agent.Name, now-agent.LastStrikeAt)
|
||||
agent.Locked = true
|
||||
agent.LastStrikeAt = now
|
||||
} else {
|
||||
// First strike. Record timestamp; don't lock.
|
||||
if err := lib.AgentRecordStrike(db, vk, agent.EntryID, now); err != nil {
|
||||
log.Printf("agent %s: failed to persist strike: %v", agent.Name, err)
|
||||
}
|
||||
lib.AuditLog(db, &lib.AuditEvent{
|
||||
Action: "agent_strike", Actor: lib.ActorAgent,
|
||||
Title: agent.Name,
|
||||
})
|
||||
log.Printf("agent %s: hour-limit strike recorded", agent.Name)
|
||||
agent.LastStrikeAt = now
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkEntry records the agent's intent to read entryID and returns:
|
||||
// - LimitAllowed — entry recorded, read allowed
|
||||
// - LimitMinuteHit — minute-window cap exceeded; entry NOT added to either set
|
||||
// - LimitHourHit — hour-window cap exceeded; entry NOT added to either set
|
||||
//
|
||||
// Repeated calls with the same entryID inside an active window are free:
|
||||
// the entry is already in the set, len(set) does not grow, the call returns
|
||||
// LimitAllowed.
|
||||
func (al *agentLimiter) checkEntry(agentID, entryID string, maxPerMinute, maxPerHour int) LimitResult {
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
e, exists := al.agents[agentID]
|
||||
if !exists || now.Sub(e.windowStart) > time.Minute {
|
||||
e = &rateLimitEntry{windowStart: now, count: 0}
|
||||
if !exists {
|
||||
e = &agentLimiterEntry{
|
||||
minuteWindowStart: now,
|
||||
minuteEntries: make(map[string]struct{}),
|
||||
hourWindowStart: now,
|
||||
hourEntries: make(map[string]struct{}),
|
||||
}
|
||||
al.agents[agentID] = e
|
||||
}
|
||||
e.count++
|
||||
return e.count <= maxPerMinute
|
||||
|
||||
// Roll the windows if they have expired.
|
||||
if now.Sub(e.minuteWindowStart) > time.Minute {
|
||||
e.minuteWindowStart = now
|
||||
e.minuteEntries = make(map[string]struct{})
|
||||
}
|
||||
if now.Sub(e.hourWindowStart) > time.Hour {
|
||||
e.hourWindowStart = now
|
||||
e.hourEntries = make(map[string]struct{})
|
||||
}
|
||||
|
||||
_, alreadyMinute := e.minuteEntries[entryID]
|
||||
_, alreadyHour := e.hourEntries[entryID]
|
||||
|
||||
// Hour limit takes precedence — it's the strike trigger.
|
||||
if maxPerHour > 0 && !alreadyHour && len(e.hourEntries) >= maxPerHour {
|
||||
return LimitHourHit
|
||||
}
|
||||
if maxPerMinute > 0 && !alreadyMinute && len(e.minuteEntries) >= maxPerMinute {
|
||||
return LimitMinuteHit
|
||||
}
|
||||
|
||||
e.minuteEntries[entryID] = struct{}{}
|
||||
e.hourEntries[entryID] = struct{}{}
|
||||
return LimitAllowed
|
||||
}
|
||||
|
||||
// allowEntry is a thin compatibility wrapper around checkEntry for callers
|
||||
// that only need a yes/no answer (currently the unit tests).
|
||||
func (al *agentLimiter) allowEntry(agentID, entryID string, maxPerMinute, maxPerHour int) bool {
|
||||
return al.checkEntry(agentID, entryID, maxPerMinute, maxPerHour) == LimitAllowed
|
||||
}
|
||||
|
||||
// CORSMiddleware handles CORS headers.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAgentLimiter_RepeatedSameEntryDoesNotCount(t *testing.T) {
|
||||
al := newAgentLimiter()
|
||||
// Limit: 2 unique per minute. Same entry fetched 10 times = 1 unique.
|
||||
for i := 0; i < 10; i++ {
|
||||
if !al.allowEntry("agent-A", "entry-1", 2, 0) {
|
||||
t.Fatalf("repeated read of entry-1 should be allowed (iter %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentLimiter_DistinctEntriesCountTowardLimit(t *testing.T) {
|
||||
al := newAgentLimiter()
|
||||
if !al.allowEntry("agent-A", "entry-1", 2, 0) {
|
||||
t.Fatal("entry-1 should be allowed")
|
||||
}
|
||||
if !al.allowEntry("agent-A", "entry-2", 2, 0) {
|
||||
t.Fatal("entry-2 should be allowed")
|
||||
}
|
||||
if al.allowEntry("agent-A", "entry-3", 2, 0) {
|
||||
t.Error("entry-3 should be blocked (3 unique > limit of 2)")
|
||||
}
|
||||
// Re-fetching one of the existing entries should still work — it's not new.
|
||||
if !al.allowEntry("agent-A", "entry-1", 2, 0) {
|
||||
t.Error("re-fetching entry-1 should still be allowed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentLimiter_PerAgentIsolation(t *testing.T) {
|
||||
al := newAgentLimiter()
|
||||
// Saturate agent A.
|
||||
al.allowEntry("agent-A", "e1", 1, 0)
|
||||
if al.allowEntry("agent-A", "e2", 1, 0) {
|
||||
t.Error("agent-A second unique entry should be blocked")
|
||||
}
|
||||
// Agent B is unaffected.
|
||||
if !al.allowEntry("agent-B", "e1", 1, 0) {
|
||||
t.Error("agent-B should be unaffected by agent-A's quota")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentLimiter_HourLimitIndependent(t *testing.T) {
|
||||
al := newAgentLimiter()
|
||||
// Minute unlimited, hour = 2.
|
||||
if !al.allowEntry("agent-A", "e1", 0, 2) {
|
||||
t.Fatal("e1 within hour limit should be allowed")
|
||||
}
|
||||
if !al.allowEntry("agent-A", "e2", 0, 2) {
|
||||
t.Fatal("e2 within hour limit should be allowed")
|
||||
}
|
||||
if al.allowEntry("agent-A", "e3", 0, 2) {
|
||||
t.Error("e3 should be blocked by hour limit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentLimiter_ZeroLimitsMeanUnlimited(t *testing.T) {
|
||||
al := newAgentLimiter()
|
||||
for i := 0; i < 100; i++ {
|
||||
entryID := "entry-" + string(rune('a'+i%26))
|
||||
if !al.allowEntry("agent-A", entryID, 0, 0) {
|
||||
t.Fatalf("zero limits = unlimited; iter %d unexpectedly blocked", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -23,7 +23,7 @@ func NewRouter(cfg *lib.Config, webFS embed.FS) *chi.Mux {
|
|||
// Security: Limit request body to 64KB. Rejects binary uploads (images, executables).
|
||||
// Markdown notes and text data only. Returns 413 if exceeded, 415 for binary.
|
||||
r.Use(MaxBodySizeMiddleware(65536))
|
||||
r.Use(RateLimitMiddleware(600)) // 600 req/min per IP (development)
|
||||
r.Use(RateLimitMiddleware(5)) // 5 req/min per IP
|
||||
r.Use(L1Middleware(cfg.DataDir))
|
||||
|
||||
// Health & Version (unauthenticated)
|
||||
|
|
@ -145,6 +145,7 @@ func mountAPIRoutes(r chi.Router, h *Handlers) {
|
|||
r.Post("/agents", h.HandleCreateAgent)
|
||||
r.Get("/agents", h.HandleListAgents)
|
||||
r.Put("/agents/{id}", h.HandleUpdateAgent)
|
||||
r.Post("/agents/{id}/unlock", h.HandleUnlockAgent)
|
||||
r.Delete("/agents/{id}", h.HandleDeleteAgent)
|
||||
|
||||
// WebAuthn credential management
|
||||
|
|
|
|||
|
|
@ -44,6 +44,15 @@ func main() {
|
|||
}
|
||||
cfg.Port = strconv.Itoa(*port)
|
||||
|
||||
// Ensure the vault and WL3 directories exist (hardcoded relative paths
|
||||
// from Config — no env vars, symlink if you want them elsewhere).
|
||||
if err := os.MkdirAll(cfg.DataDir, 0o700); err != nil {
|
||||
log.Fatalf("create vault dir %s: %v", cfg.DataDir, err)
|
||||
}
|
||||
if err := os.MkdirAll(cfg.WL3Dir, 0o755); err != nil {
|
||||
log.Fatalf("create WL3 dir %s: %v", cfg.WL3Dir, err)
|
||||
}
|
||||
|
||||
// Initialize edition-specific configuration
|
||||
log.Printf("Starting Clavitor Vault %s - %s Edition", version, edition.Current.Name())
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,60 @@
|
|||
return new Uint8Array(bits);
|
||||
}
|
||||
|
||||
// Derive the 8-byte P1 lookup token from the 32-byte master key.
|
||||
// P1 is the public WL3 lookup index. The server uses it to find the credential
|
||||
// file but cannot decrypt anything from it. P1 ⊥ L1 (different HKDF info string).
|
||||
async function deriveP1(masterBytes) {
|
||||
const keyMaterial = await crypto.subtle.importKey(
|
||||
'raw', masterBytes, { name: 'HKDF' }, false, ['deriveBits']
|
||||
);
|
||||
const bits = await crypto.subtle.deriveBits(
|
||||
{ name: 'HKDF', hash: 'SHA-256', salt: new Uint8Array(0), info: new TextEncoder().encode('clavitor-p1-v1') },
|
||||
keyMaterial, 64
|
||||
);
|
||||
return new Uint8Array(bits);
|
||||
}
|
||||
|
||||
// Hex-encode bytes (lowercase, no separator).
|
||||
function hexEncode(bytes) {
|
||||
return Array.from(bytes, b => b.toString(16).padStart(2, '0')).join('');
|
||||
}
|
||||
|
||||
// Wrap L3 (32 bytes) with the raw PRF output (32 bytes) using AES-256-GCM.
|
||||
// Output format: nonce(12) || ciphertext || tag — Web Crypto puts the tag at the end.
|
||||
// Server stores this opaquely; only a future device tap can unwrap it.
|
||||
async function wrapL3(l3Bytes, prfBytes) {
|
||||
const key = await crypto.subtle.importKey(
|
||||
'raw', prfBytes, { name: 'AES-GCM' }, false, ['encrypt']
|
||||
);
|
||||
const nonce = crypto.getRandomValues(new Uint8Array(12));
|
||||
const ct = await crypto.subtle.encrypt(
|
||||
{ name: 'AES-GCM', iv: nonce },
|
||||
key, l3Bytes
|
||||
);
|
||||
const out = new Uint8Array(nonce.length + ct.byteLength);
|
||||
out.set(nonce, 0);
|
||||
out.set(new Uint8Array(ct), nonce.length);
|
||||
return out;
|
||||
}
|
||||
|
||||
// Unwrap an L3 blob produced by wrapL3 using the same PRF output.
|
||||
async function unwrapL3(wrappedBytes, prfBytes) {
|
||||
if (wrappedBytes.length < 12 + 16) {
|
||||
throw new Error('wrapped L3 too short');
|
||||
}
|
||||
const nonce = wrappedBytes.slice(0, 12);
|
||||
const ct = wrappedBytes.slice(12);
|
||||
const key = await crypto.subtle.importKey(
|
||||
'raw', prfBytes, { name: 'AES-GCM' }, false, ['decrypt']
|
||||
);
|
||||
const pt = await crypto.subtle.decrypt(
|
||||
{ name: 'AES-GCM', iv: nonce },
|
||||
key, ct
|
||||
);
|
||||
return new Uint8Array(pt);
|
||||
}
|
||||
|
||||
// Store master key in sessionStorage (base64)
|
||||
function storeMaster(masterBytes) {
|
||||
const b64 = btoa(String.fromCharCode.apply(null, masterBytes));
|
||||
|
|
@ -52,13 +106,15 @@
|
|||
return !!getMaster();
|
||||
}
|
||||
|
||||
// Store vault hint persistently (for returning users)
|
||||
function storeVaultHint(l0, credentialIds) {
|
||||
// Store vault hint persistently (for returning users).
|
||||
// p1 is the preferred lookup token; l0 is kept as a fallback for old hints.
|
||||
function storeVaultHint(l0, credentialIds, p1) {
|
||||
const hint = {
|
||||
l0: b64Encode(l0),
|
||||
credentialIds: credentialIds.map(id => b64Encode(id)),
|
||||
storedAt: Date.now()
|
||||
};
|
||||
if (p1) hint.p1 = hexEncode(p1);
|
||||
localStorage.setItem(VAULT_HINT_KEY, JSON.stringify(hint));
|
||||
}
|
||||
|
||||
|
|
@ -70,7 +126,8 @@
|
|||
const hint = JSON.parse(json);
|
||||
return {
|
||||
l0: b64Decode(hint.l0),
|
||||
credentialIds: hint.credentialIds.map(id => b64Decode(id))
|
||||
credentialIds: hint.credentialIds.map(id => b64Decode(id)),
|
||||
p1: hint.p1 || null // hex string, or null for legacy hints
|
||||
};
|
||||
} catch (e) {
|
||||
return null;
|
||||
|
|
@ -169,13 +226,35 @@
|
|||
return await completeRegistration(credential, prfOutput, deviceName, deviceType, options);
|
||||
}
|
||||
|
||||
// Complete registration after we have PRF
|
||||
// Complete registration after we have PRF.
|
||||
//
|
||||
// The browser is the only place that ever holds L2/L3 or the full master
|
||||
// key. The server gets only the minimum it needs:
|
||||
// - L1 (8 bytes) → server slices L0 from L1[:4] for vault file naming;
|
||||
// L1 itself is later used as the bearer for entry
|
||||
// encryption keys.
|
||||
// - P1 (8 bytes) → public WL3 lookup token, server stores as filename.
|
||||
// - wrapped_l3 → AES-GCM(L3, raw PRF). Opaque blob; server cannot
|
||||
// decrypt. Used by future device taps to recover L3.
|
||||
//
|
||||
// master_key is NOT sent. L2 and L3 plaintext NEVER leave this function.
|
||||
async function completeRegistration(credential, prfOutput, deviceName, deviceType, options) {
|
||||
// Derive master key
|
||||
const masterKey = await deriveMaster(new Uint8Array(prfOutput));
|
||||
const prfBytes = new Uint8Array(prfOutput);
|
||||
|
||||
// Derive master key (32 bytes); this is the in-browser working key.
|
||||
const masterKey = await deriveMaster(prfBytes);
|
||||
storeMaster(masterKey);
|
||||
|
||||
// Tell server to create vault (sends master key so server knows L0)
|
||||
// Slice the tiers we need for the wire request.
|
||||
const l1 = masterKey.slice(0, 8); // server-allowed
|
||||
const l3 = masterKey.slice(0, 32); // server-VETOED — wrap before sending
|
||||
|
||||
// Derive the public lookup token.
|
||||
const p1 = await deriveP1(masterKey);
|
||||
|
||||
// Wrap L3 with the raw PRF output. The server stores this opaquely.
|
||||
const wrappedL3 = await wrapL3(l3, prfBytes);
|
||||
|
||||
const completeRes = await fetch('/api/auth/register/complete', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
|
|
@ -185,7 +264,9 @@
|
|||
public_key: b64Encode(new Uint8Array(credential.response.getPublicKey())),
|
||||
name: deviceName,
|
||||
authenticator_attachment: deviceType,
|
||||
master_key: b64Encode(masterKey)
|
||||
l1: b64Encode(l1),
|
||||
p1: b64Encode(p1),
|
||||
wrapped_l3: b64Encode(wrappedL3)
|
||||
})
|
||||
});
|
||||
|
||||
|
|
@ -219,20 +300,23 @@
|
|||
// ---------------------------------------------------------------------------
|
||||
|
||||
async function unlock() {
|
||||
// Get L1 from master key if available (from previous session)
|
||||
// If we already have a master key from this session, derive both P1
|
||||
// (preferred WL3 lookup) and L1 (legacy fallback) from it.
|
||||
let p1Hex = null;
|
||||
let l1Base64 = null;
|
||||
const masterKeyB64 = sessionStorage.getItem('clavitor_master');
|
||||
if (masterKeyB64) {
|
||||
const masterBytes = Uint8Array.from(atob(masterKeyB64), c => c.charCodeAt(0));
|
||||
const l1 = masterBytes.slice(0, 8); // L1 = first 8 bytes
|
||||
l1Base64 = b64Encode(l1);
|
||||
const p1Bytes = await deriveP1(masterBytes);
|
||||
p1Hex = hexEncode(p1Bytes);
|
||||
l1Base64 = b64Encode(masterBytes.slice(0, 8));
|
||||
}
|
||||
|
||||
// 1. Call login/begin with L1 (server derives L0 and opens vault)
|
||||
// 1. Call login/begin. Server tries P1 first, falls back to L1/L0.
|
||||
const beginRes = await fetch('/api/auth/login/begin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ l1: l1Base64 }) // null if no previous session
|
||||
body: JSON.stringify({ p1: p1Hex, l1: l1Base64 })
|
||||
});
|
||||
|
||||
if (!beginRes.ok) {
|
||||
|
|
@ -288,10 +372,12 @@
|
|||
throw new Error('Authentication failed');
|
||||
}
|
||||
|
||||
// Store vault hint for future sessions (persistent)
|
||||
// Store vault hint for future sessions (persistent).
|
||||
// Includes P1 so subsequent unlockWithHint() uses the WL3 lookup path.
|
||||
const l0 = masterKey.slice(0, 4);
|
||||
const p1Bytes = await deriveP1(masterKey);
|
||||
const credIds = options.publicKey.allowCredentials.map(c => b64Decode(c.id));
|
||||
storeVaultHint(l0, credIds);
|
||||
storeVaultHint(l0, credIds, p1Bytes);
|
||||
|
||||
return { status: 'unlocked' };
|
||||
}
|
||||
|
|
@ -306,14 +392,12 @@
|
|||
throw new Error('No vault hint stored - registration required');
|
||||
}
|
||||
|
||||
// Get L0 from hint and send to server to open vault
|
||||
const l1Base64 = b64Encode(new Uint8Array(8)); // dummy L1, server will use stored creds
|
||||
|
||||
// 1. Call login/begin with hint's L0
|
||||
// Prefer P1 lookup if the hint has it; fall back to L0 for legacy hints.
|
||||
const beginRes = await fetch('/api/auth/login/begin', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
p1: hint.p1, // null for legacy hints, server falls through
|
||||
l0: b64Encode(hint.l0),
|
||||
credential_ids: hint.credentialIds.map(id => b64Encode(id))
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1044,7 +1044,10 @@ async function doImport() {
|
|||
ui.importBtn.textContent = 'Importing...';
|
||||
|
||||
var total = selected.length, imported = 0, updated = 0, skipped = 0, failed = 0;
|
||||
var batchSize = 50;
|
||||
// 250 entries × ~200 bytes/entry ≈ 50KB per request, comfortably under
|
||||
// MaxBodySizeMiddleware(65536). Batch endpoint is exempt from the global
|
||||
// rate limit (api/middleware.go), so chunking is purely a body-size concern.
|
||||
var batchSize = 250;
|
||||
for (var i = 0; i < total; i += batchSize) {
|
||||
var chunk = selected.slice(i, i + batchSize).map(function(r) {
|
||||
return {
|
||||
|
|
|
|||
|
|
@ -1,26 +1,25 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Config holds application configuration.
|
||||
//
|
||||
// Paths are hardcoded relative to the executable's working directory:
|
||||
// - vault SQLite files live in ./vaults/
|
||||
// - WL3 credential JSONs live in ./WL3/
|
||||
//
|
||||
// No env vars, no flags. People who want different locations use a symlink.
|
||||
type Config struct {
|
||||
Port string // default "1984"
|
||||
DataDir string // directory for vault DB files
|
||||
DataDir string // ./vaults — vault SQLite files
|
||||
WL3Dir string // ./WL3 — WL3 credential JSONs
|
||||
SessionTTL int64 // default 86400 (24 hours)
|
||||
}
|
||||
|
||||
// LoadConfig loads configuration from environment variables.
|
||||
// LoadConfig returns the default configuration.
|
||||
func LoadConfig() (*Config, error) {
|
||||
dataDir := os.Getenv("DATA_DIR")
|
||||
if dataDir == "" {
|
||||
dataDir = "."
|
||||
}
|
||||
|
||||
return &Config{
|
||||
Port: "1984",
|
||||
DataDir: dataDir,
|
||||
DataDir: "vaults",
|
||||
WL3Dir: "WL3",
|
||||
SessionTTL: 86400,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,10 +14,14 @@ import (
|
|||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
||||
// CVT record types
|
||||
// CVT record types.
|
||||
//
|
||||
// Only the wire token (0x00) is handled by the vault server. The client
|
||||
// credential type (0x01, L2-bearing) is implemented in the C CLI
|
||||
// (clavis-cli/src/cvt.c) and never decrypted by Go code — L2 is a hard veto
|
||||
// for the server.
|
||||
const (
|
||||
CVTWireToken byte = 0x00 // Sent to vault: L1(8) + agent_id(16)
|
||||
CVTClientCredential byte = 0x01 // Stored on CLI: L2(16) + agent_id(16) + POP(4), encrypted with L0
|
||||
CVTWireToken byte = 0x00 // Sent to vault: L1(8) + agent_id(16)
|
||||
)
|
||||
|
||||
const cvtPrefix = "cvt_"
|
||||
|
|
@ -32,24 +36,6 @@ var (
|
|||
// Minting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// MintCredential creates a type 0x01 client credential token.
|
||||
// Payload: L2(16) + agent_id(16) + POP(4, null-padded), encrypted with L0 (4 bytes).
|
||||
// POP is optional (empty string = 4 null bytes). CLI extracts POP after decrypt.
|
||||
func MintCredential(l0, l2, agentID []byte, pop string) (string, error) {
|
||||
if len(l0) != 4 || len(l2) != 16 || len(agentID) != 16 {
|
||||
return "", fmt.Errorf("bad lengths: l0=%d l2=%d agent_id=%d", len(l0), len(l2), len(agentID))
|
||||
}
|
||||
// POP is 4 bytes, null-padded if shorter
|
||||
popBytes := make([]byte, 4)
|
||||
copy(popBytes, pop)
|
||||
|
||||
payload := make([]byte, 36)
|
||||
copy(payload[0:16], l2)
|
||||
copy(payload[16:32], agentID)
|
||||
copy(payload[32:36], popBytes)
|
||||
return cvtEncode(CVTClientCredential, l0, payload)
|
||||
}
|
||||
|
||||
// MintWireToken creates a type 0x00 wire token.
|
||||
// Embeds L1 (8 bytes) + agent_id (16 bytes), encrypted with L0 (4 bytes).
|
||||
func MintWireToken(l0, l1, agentID []byte) (string, error) {
|
||||
|
|
@ -66,23 +52,6 @@ func MintWireToken(l0, l1, agentID []byte) (string, error) {
|
|||
// Parsing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// ParseCredential decrypts a type 0x01 client credential.
|
||||
// Returns L0 (4 bytes), L2 (16 bytes), agent_id (16 bytes), and POP (4 bytes, trim nulls).
|
||||
// POP is null-padded if shorter than 4 chars; use strings.TrimRight(string(pop), "\x00") to get clean string.
|
||||
func ParseCredential(token string) (l0, l2, agentID, pop []byte, err error) {
|
||||
typ, l0, payload, err := cvtDecode(token)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
if typ != CVTClientCredential {
|
||||
return nil, nil, nil, nil, ErrCVTBadType
|
||||
}
|
||||
if len(payload) != 36 {
|
||||
return nil, nil, nil, nil, fmt.Errorf("credential payload: got %d bytes, want 36", len(payload))
|
||||
}
|
||||
return l0, payload[0:16], payload[16:32], payload[32:36], nil
|
||||
}
|
||||
|
||||
// ParseWireToken decrypts a type 0x00 wire token.
|
||||
// Returns L0 (4 bytes), L1 (8 bytes), and agent_id (16 bytes).
|
||||
func ParseWireToken(token string) (l0, l1, agentID []byte, err error) {
|
||||
|
|
@ -99,22 +68,6 @@ func ParseWireToken(token string) (l0, l1, agentID []byte, err error) {
|
|||
return l0, payload[0:8], payload[8:24], nil
|
||||
}
|
||||
|
||||
// CredentialToWire converts a stored client credential to a wire token.
|
||||
// Decrypts 0x01, extracts L2 → L1 = L2[:8], keeps agent_id, builds 0x00.
|
||||
// Returns the wire token string, L2 (for client-side decryption), and POP (for endpoint selection).
|
||||
func CredentialToWire(credential string) (wire string, l2, pop []byte, err error) {
|
||||
l0, l2, agentID, pop, err := ParseCredential(credential)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
l1 := l2[:8]
|
||||
wire, err = MintWireToken(l0, l1, agentID)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
return wire, l2, pop, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CVT envelope: type(1) + L0(4) + AES-GCM(derived(L0), payload)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -2,88 +2,9 @@ package lib
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMintCredential_roundtrip(t *testing.T) {
|
||||
l0 := []byte{0xAA, 0xBB, 0xCC, 0xDD}
|
||||
l2 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}
|
||||
agentID := make([]byte, 16)
|
||||
for i := range agentID {
|
||||
agentID[i] = byte(0x20 + i)
|
||||
}
|
||||
pop := "use1"
|
||||
|
||||
token, err := MintCredential(l0, l2, agentID, pop)
|
||||
if err != nil {
|
||||
t.Fatalf("MintCredential: %v", err)
|
||||
}
|
||||
if token[:4] != "cvt_" {
|
||||
t.Fatalf("missing cvt_ prefix: %s", token[:10])
|
||||
}
|
||||
|
||||
gotL0, gotL2, gotAgentID, gotPOP, err := ParseCredential(token)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseCredential: %v", err)
|
||||
}
|
||||
if !bytes.Equal(gotL0, l0) {
|
||||
t.Fatalf("L0 mismatch: %x != %x", gotL0, l0)
|
||||
}
|
||||
if !bytes.Equal(gotL2, l2) {
|
||||
t.Fatalf("L2 mismatch: %x != %x", gotL2, l2)
|
||||
}
|
||||
if !bytes.Equal(gotAgentID, agentID) {
|
||||
t.Fatalf("agent_id mismatch: %x != %x", gotAgentID, agentID)
|
||||
}
|
||||
gotPOPStr := strings.TrimRight(string(gotPOP), "\x00")
|
||||
if gotPOPStr != pop {
|
||||
t.Fatalf("POP mismatch: %q != %q", gotPOPStr, pop)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintCredential_noPOP(t *testing.T) {
|
||||
l0 := []byte{0xAA, 0xBB, 0xCC, 0xDD}
|
||||
l2 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}
|
||||
agentID := make([]byte, 16)
|
||||
|
||||
// Empty POP (Community/self-hosted)
|
||||
token, err := MintCredential(l0, l2, agentID, "")
|
||||
if err != nil {
|
||||
t.Fatalf("MintCredential: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, gotPOP, err := ParseCredential(token)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseCredential: %v", err)
|
||||
}
|
||||
// Should be 4 null bytes
|
||||
if !bytes.Equal(gotPOP, []byte{0, 0, 0, 0}) {
|
||||
t.Fatalf("POP should be null bytes for empty pop: %x", gotPOP)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintCredential_shortPOP(t *testing.T) {
|
||||
l0 := []byte{0xAA, 0xBB, 0xCC, 0xDD}
|
||||
l2 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}
|
||||
agentID := make([]byte, 16)
|
||||
|
||||
// 3-char POP (ca1) - should be stored as "ca1\x00"
|
||||
token, err := MintCredential(l0, l2, agentID, "ca1")
|
||||
if err != nil {
|
||||
t.Fatalf("MintCredential: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, gotPOP, err := ParseCredential(token)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseCredential: %v", err)
|
||||
}
|
||||
gotPOPStr := strings.TrimRight(string(gotPOP), "\x00")
|
||||
if gotPOPStr != "ca1" {
|
||||
t.Fatalf("POP mismatch: %q != %q", gotPOPStr, "ca1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintWireToken_roundtrip(t *testing.T) {
|
||||
l0 := []byte{0x11, 0x22, 0x33, 0x44}
|
||||
l1 := []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
|
||||
|
|
@ -112,44 +33,6 @@ func TestMintWireToken_roundtrip(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCredentialToWire(t *testing.T) {
|
||||
l0 := []byte{0xAA, 0xBB, 0xCC, 0xDD}
|
||||
l2 := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}
|
||||
agentID := make([]byte, 16)
|
||||
for i := range agentID {
|
||||
agentID[i] = byte(0x60 + i)
|
||||
}
|
||||
|
||||
cred, err := MintCredential(l0, l2, agentID, "use1")
|
||||
if err != nil {
|
||||
t.Fatalf("MintCredential: %v", err)
|
||||
}
|
||||
|
||||
wire, gotL2, gotPOP, err := CredentialToWire(cred)
|
||||
if err != nil {
|
||||
t.Fatalf("CredentialToWire: %v", err)
|
||||
}
|
||||
if !bytes.Equal(gotL2, l2) {
|
||||
t.Fatalf("L2 mismatch")
|
||||
}
|
||||
gotPOPStr := strings.TrimRight(string(gotPOP), "\x00")
|
||||
if gotPOPStr != "use1" {
|
||||
t.Fatalf("POP mismatch: %q != %q", gotPOPStr, "use1")
|
||||
}
|
||||
|
||||
// Parse the wire token — should have L1 = L2[:8]
|
||||
_, gotL1, gotAgentID, err := ParseWireToken(wire)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseWireToken: %v", err)
|
||||
}
|
||||
if !bytes.Equal(gotL1, l2[:8]) {
|
||||
t.Fatalf("L1 should be L2[:8]: got %x, want %x", gotL1, l2[:8])
|
||||
}
|
||||
if !bytes.Equal(gotAgentID, agentID) {
|
||||
t.Fatalf("agent_id mismatch after credential→wire")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCVT_tamper_detection(t *testing.T) {
|
||||
l0 := []byte{0x11, 0x22, 0x33, 0x44}
|
||||
l1 := []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
|
||||
|
|
@ -165,20 +48,6 @@ func TestCVT_tamper_detection(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCVT_wrong_type(t *testing.T) {
|
||||
l0 := []byte{0x11, 0x22, 0x33, 0x44}
|
||||
l2 := make([]byte, 16)
|
||||
agentID := make([]byte, 16)
|
||||
|
||||
cred, _ := MintCredential(l0, l2, agentID, "")
|
||||
|
||||
// Try to parse credential as wire token — should fail
|
||||
_, _, _, err := ParseWireToken(cred)
|
||||
if err != ErrCVTBadType {
|
||||
t.Fatalf("expected ErrCVTBadType, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCVT_unique(t *testing.T) {
|
||||
l0 := []byte{0x11, 0x22, 0x33, 0x44}
|
||||
l1 := []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}
|
||||
|
|
|
|||
|
|
@ -73,6 +73,30 @@ CREATE TABLE IF NOT EXISTS webauthn_challenges (
|
|||
type TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
-- Slice 2 (commercial enrollment gating).
|
||||
-- These tables exist in all builds but are only written to by commercial-only
|
||||
-- handlers. In community vaults they stay empty — community has no central
|
||||
-- admin issuing tokens.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS pending_enrollments (
|
||||
token TEXT PRIMARY KEY, -- 6 uppercase chars (e.g. AB7QXP)
|
||||
customer_id TEXT NOT NULL, -- central admin's customer id, opaque to POP
|
||||
plan TEXT NOT NULL, -- plan name for display/audit
|
||||
name TEXT, -- 'Anna' — display name set at issue time
|
||||
expires_at INTEGER NOT NULL, -- unix seconds, typically now+24h
|
||||
consumed_at INTEGER, -- NULL until claimed
|
||||
consumed_by_l0 TEXT, -- L0 (hex) of the vault that claimed the token
|
||||
created_at INTEGER NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_pending_enrollments_expires ON pending_enrollments(expires_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS enrollment_attempts (
|
||||
ip TEXT PRIMARY KEY,
|
||||
window_start INTEGER NOT NULL, -- start of the 10-minute window (unix s)
|
||||
fail_count INTEGER NOT NULL DEFAULT 0,
|
||||
blocked_until INTEGER NOT NULL DEFAULT 0 -- unix s; 0 = not blocked
|
||||
);
|
||||
`
|
||||
|
||||
// OpenDB opens the SQLite database.
|
||||
|
|
@ -87,22 +111,12 @@ func OpenDB(dbPath string) (*DB, error) {
|
|||
return &DB{Conn: conn, DBPath: dbPath}, nil
|
||||
}
|
||||
|
||||
// MigrateDB runs the schema and handles migrations.
|
||||
func MigrateDB(db *DB) error {
|
||||
// InitSchema creates tables for a new vault. No migrations — early stage.
|
||||
// Schema changes = delete vault and re-register. See CLAVITOR-PRINCIPLES.md
|
||||
func InitSchema(db *DB) error {
|
||||
if _, err := db.Conn.Exec(schema); err != nil {
|
||||
return err
|
||||
}
|
||||
// Migration: add columns if not present
|
||||
columns := []string{
|
||||
`ALTER TABLE entries ADD COLUMN alternate_for INTEGER`,
|
||||
`ALTER TABLE entries ADD COLUMN verified_at INTEGER`,
|
||||
`ALTER TABLE webauthn_credentials ADD COLUMN authenticator_attachment TEXT NOT NULL DEFAULT ''`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_entries_alternate ON entries(alternate_for) WHERE alternate_for IS NOT NULL`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_entries_verified ON entries(verified_at) WHERE verified_at IS NOT NULL`,
|
||||
}
|
||||
for _, col := range columns {
|
||||
db.Conn.Exec(col) // ignore errors (column may already exist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -583,135 +597,109 @@ func AgentLookup(db *DB, vaultKey []byte, agentIDHex string) (*AgentData, error)
|
|||
RateLimit: vd.RateLimit,
|
||||
RateLimitHour: vd.RateLimitHour,
|
||||
Locked: vd.Locked,
|
||||
LastStrikeAt: vd.LastStrikeAt,
|
||||
EntryID: e.EntryID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// agentMutate runs fn against an agent entry's decrypted VaultData,
|
||||
// then re-encrypts and persists. The shared core for every agent-record
|
||||
// state change so each individual mutation is one line.
|
||||
func agentMutate(db *DB, vaultKey []byte, entryID HexID, fn func(*VaultData)) error {
|
||||
var e Entry
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT entry_id, data, data_level FROM entries WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
int64(entryID),
|
||||
).Scan(&e.EntryID, &e.Data, &e.DataLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var vd VaultData
|
||||
if err := json.Unmarshal([]byte(dataText), &vd); err != nil {
|
||||
return err
|
||||
}
|
||||
fn(&vd)
|
||||
updated, err := json.Marshal(vd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(updated))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = db.Conn.Exec(`UPDATE entries SET data = ?, updated_at = ? WHERE entry_id = ?`,
|
||||
packed, time.Now().Unix(), int64(entryID))
|
||||
return err
|
||||
}
|
||||
|
||||
// AgentUpdateAllowedIPs re-encrypts the agent entry data with updated AllowedIPs.
|
||||
func AgentUpdateAllowedIPs(db *DB, vaultKey []byte, agent *AgentData) error {
|
||||
var e Entry
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT entry_id, data, data_level FROM entries WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
int64(agent.EntryID),
|
||||
).Scan(&e.EntryID, &e.Data, &e.DataLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var vd VaultData
|
||||
if err := json.Unmarshal([]byte(dataText), &vd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vd.AllowedIPs = agent.AllowedIPs
|
||||
updated, err := json.Marshal(vd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(updated))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Conn.Exec(`UPDATE entries SET data = ?, updated_at = ? WHERE entry_id = ?`,
|
||||
packed, time.Now().Unix(), int64(agent.EntryID))
|
||||
return err
|
||||
return agentMutate(db, vaultKey, agent.EntryID, func(vd *VaultData) {
|
||||
vd.AllowedIPs = agent.AllowedIPs
|
||||
})
|
||||
}
|
||||
|
||||
// AgentLock sets the locked flag on an agent (rate limit hit).
|
||||
// AgentRecordStrike updates an agent's LastStrikeAt timestamp.
|
||||
// First strike — agent is throttled but not locked.
|
||||
func AgentRecordStrike(db *DB, vaultKey []byte, entryID HexID, ts int64) error {
|
||||
return agentMutate(db, vaultKey, entryID, func(vd *VaultData) {
|
||||
vd.LastStrikeAt = ts
|
||||
})
|
||||
}
|
||||
|
||||
// AgentLockWithStrike sets Locked=true and updates LastStrikeAt atomically.
|
||||
// Used for the second strike within the 2-hour window.
|
||||
func AgentLockWithStrike(db *DB, vaultKey []byte, entryID HexID, ts int64) error {
|
||||
return agentMutate(db, vaultKey, entryID, func(vd *VaultData) {
|
||||
vd.Locked = true
|
||||
vd.LastStrikeAt = ts
|
||||
})
|
||||
}
|
||||
|
||||
// AgentLock sets the locked flag on an agent without changing LastStrikeAt.
|
||||
func AgentLock(db *DB, vaultKey []byte, entryID HexID) error {
|
||||
var e Entry
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT entry_id, data, data_level FROM entries WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
int64(entryID),
|
||||
).Scan(&e.EntryID, &e.Data, &e.DataLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var vd VaultData
|
||||
if err := json.Unmarshal([]byte(dataText), &vd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vd.Locked = true
|
||||
updated, err := json.Marshal(vd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(updated))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Conn.Exec(`UPDATE entries SET data = ?, updated_at = ? WHERE entry_id = ?`,
|
||||
packed, time.Now().Unix(), int64(entryID))
|
||||
return err
|
||||
return agentMutate(db, vaultKey, entryID, func(vd *VaultData) {
|
||||
vd.Locked = true
|
||||
})
|
||||
}
|
||||
|
||||
// AgentUnlock clears the locked flag on an agent (PRF confirmed).
|
||||
// AgentUnlock clears Locked and resets the strike clock.
|
||||
// Owner-driven (PRF tap) — also clears LastStrikeAt so the agent gets a fresh start.
|
||||
func AgentUnlock(db *DB, vaultKey []byte, entryID HexID) error {
|
||||
var e Entry
|
||||
err := db.Conn.QueryRow(
|
||||
`SELECT entry_id, data, data_level FROM entries WHERE entry_id = ? AND deleted_at IS NULL`,
|
||||
int64(entryID),
|
||||
).Scan(&e.EntryID, &e.Data, &e.DataLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return agentMutate(db, vaultKey, entryID, func(vd *VaultData) {
|
||||
vd.Locked = false
|
||||
vd.LastStrikeAt = 0
|
||||
})
|
||||
}
|
||||
|
||||
entryKey, err := DeriveEntryKey(vaultKey, int64(e.EntryID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dataText, err := Unpack(entryKey, e.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var vd VaultData
|
||||
if err := json.Unmarshal([]byte(dataText), &vd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vd.Locked = false
|
||||
updated, err := json.Marshal(vd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packed, err := Pack(entryKey, string(updated))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = db.Conn.Exec(`UPDATE entries SET data = ?, updated_at = ? WHERE entry_id = ?`,
|
||||
packed, time.Now().Unix(), int64(entryID))
|
||||
return err
|
||||
// AgentCreateOpts is the options struct for creating an agent.
|
||||
type AgentCreateOpts struct {
|
||||
POP string // POP routing tag (commercial); "" for community
|
||||
Name string // human-readable label
|
||||
Scopes string // comma-separated scope IDs; "" or "auto" → fresh random scope
|
||||
AllAccess bool // true → bypass scope checks
|
||||
Admin bool // true → can create/edit/delete other agents
|
||||
RateLimit int // unique entries per minute; 0 = unlimited
|
||||
RateLimitHour int // unique entries per hour; 0 = unlimited
|
||||
}
|
||||
|
||||
// AgentCreate creates an agent entry. The credential token is generated client-side by the web UI.
|
||||
func AgentCreate(db *DB, vaultKey, l0 []byte, pop string, name string, scopes string, allAccess, admin bool) (*AgentData, error) {
|
||||
func AgentCreate(db *DB, vaultKey, l0 []byte, opts AgentCreateOpts) (*AgentData, error) {
|
||||
// Generate random 16-byte agent_id and scope_id
|
||||
agentID := make([]byte, 16)
|
||||
rand.Read(agentID)
|
||||
agentIDHex := hex.EncodeToString(agentID)
|
||||
|
||||
// Auto-assign scope if not provided
|
||||
scopes := opts.Scopes
|
||||
if scopes == "" || scopes == "auto" {
|
||||
scopeID := make([]byte, 16)
|
||||
rand.Read(scopeID)
|
||||
|
|
@ -720,17 +708,19 @@ func AgentCreate(db *DB, vaultKey, l0 []byte, pop string, name string, scopes st
|
|||
|
||||
// Create agent entry (NO L2 stored - agent gets it from their credential token generated client-side)
|
||||
vd := &VaultData{
|
||||
Title: name,
|
||||
Type: TypeAgent,
|
||||
AgentID: agentIDHex,
|
||||
Scopes: scopes,
|
||||
AllAccess: allAccess,
|
||||
Admin: admin,
|
||||
Title: opts.Name,
|
||||
Type: TypeAgent,
|
||||
AgentID: agentIDHex,
|
||||
Scopes: scopes,
|
||||
AllAccess: opts.AllAccess,
|
||||
Admin: opts.Admin,
|
||||
RateLimit: opts.RateLimit,
|
||||
RateLimitHour: opts.RateLimitHour,
|
||||
}
|
||||
|
||||
entry := &Entry{
|
||||
Type: TypeAgent,
|
||||
Title: name,
|
||||
Title: opts.Name,
|
||||
DataLevel: DataLevelL1,
|
||||
Scopes: ScopeOwner, // agent entries are owner-only
|
||||
VaultData: vd,
|
||||
|
|
@ -747,15 +737,15 @@ func AgentCreate(db *DB, vaultKey, l0 []byte, pop string, name string, scopes st
|
|||
return nil, err
|
||||
}
|
||||
|
||||
agent := &AgentData{
|
||||
AgentID: agentIDHex,
|
||||
Name: name,
|
||||
Scopes: scopes,
|
||||
AllAccess: allAccess,
|
||||
Admin: admin,
|
||||
}
|
||||
|
||||
return agent, nil
|
||||
return &AgentData{
|
||||
AgentID: agentIDHex,
|
||||
Name: opts.Name,
|
||||
Scopes: scopes,
|
||||
AllAccess: opts.AllAccess,
|
||||
Admin: opts.Admin,
|
||||
RateLimit: opts.RateLimit,
|
||||
RateLimitHour: opts.RateLimitHour,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -1,353 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DetectAndParse attempts to parse known password manager formats directly.
|
||||
// Returns (entries, true) if format recognized, (nil, false) if unknown.
|
||||
func DetectAndParse(content []byte) ([]VaultData, bool) {
|
||||
// If it's a zip, extract the first JSON file and parse that
|
||||
if content, ok := extractFromZip(content); ok {
|
||||
return DetectAndParse(content)
|
||||
}
|
||||
// Strip UTF-8 BOM if present (common in browser CSV exports)
|
||||
content = bytes.TrimPrefix(content, []byte{0xEF, 0xBB, 0xBF})
|
||||
// Try Bitwarden/generic JSON array with "items" key
|
||||
if entries, ok := parseBitwardenJSON(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
// Try Proton Pass JSON
|
||||
if entries, ok := parseProtonJSON(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
// Try Chrome/Firefox CSV
|
||||
if entries, ok := parseBrowserCSV(content); ok {
|
||||
AutoL2Fields(entries)
|
||||
return entries, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// extractFromZip tries to read content as a zip archive and returns the first
|
||||
// JSON or CSV file found inside. Used for Proton Pass exports (zip containing JSON).
|
||||
func extractFromZip(content []byte) ([]byte, bool) {
|
||||
r, err := zip.NewReader(bytes.NewReader(content), int64(len(content)))
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, f := range r.File {
|
||||
name := strings.ToLower(f.Name)
|
||||
if strings.HasSuffix(name, ".json") || strings.HasSuffix(name, ".csv") {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
data, err := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return data, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// readCSV parses content with the given delimiter. Returns (records, true) if valid with ≥2 rows.
|
||||
func readCSV(content []byte, delim rune) ([][]string, bool) {
|
||||
r := csv.NewReader(bytes.NewReader(content))
|
||||
r.Comma = delim
|
||||
r.LazyQuotes = true
|
||||
r.TrimLeadingSpace = true
|
||||
records, err := r.ReadAll()
|
||||
if err != nil || len(records) < 2 {
|
||||
return nil, false
|
||||
}
|
||||
// Sanity: header should have at least 3 columns
|
||||
if len(records[0]) < 3 {
|
||||
return nil, false
|
||||
}
|
||||
return records, true
|
||||
}
|
||||
|
||||
// --- Chrome CSV ---
|
||||
// Columns: name,url,username,password (Chrome)
|
||||
// Columns: url,username,password,httpRealm,formActionOrigin,guid,timeCreated,timeLastUsed,timePasswordChanged (Firefox)
|
||||
func parseBrowserCSV(content []byte) ([]VaultData, bool) {
|
||||
// Try comma first, then semicolon (European locale exports)
|
||||
records, ok := readCSV(content, ',')
|
||||
if !ok {
|
||||
records, ok = readCSV(content, ';')
|
||||
}
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
header := records[0]
|
||||
colIdx := map[string]int{}
|
||||
for i, h := range header {
|
||||
colIdx[strings.ToLower(strings.TrimSpace(h))] = i
|
||||
}
|
||||
|
||||
// Detect Chrome: name, url, username, password
|
||||
// Detect Firefox: url, username, password (+ extras)
|
||||
urlCol, hasURL := colIdx["url"]
|
||||
userCol, hasUser := colIdx["username"]
|
||||
passCol, hasPass := colIdx["password"]
|
||||
nameCol, hasName := colIdx["name"]
|
||||
// Firefox: timePasswordChanged (Unix microseconds)
|
||||
timeChangedCol, hasTimeChanged := colIdx["timepasswordchanged"]
|
||||
|
||||
if !hasURL || !hasUser || !hasPass {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var entries []VaultData
|
||||
for _, row := range records[1:] {
|
||||
if len(row) <= urlCol || len(row) <= passCol {
|
||||
continue
|
||||
}
|
||||
title := ""
|
||||
if hasName && len(row) > nameCol {
|
||||
title = row[nameCol]
|
||||
}
|
||||
if title == "" {
|
||||
title = row[urlCol]
|
||||
}
|
||||
entry := VaultData{
|
||||
Title: title,
|
||||
Type: "credential",
|
||||
Fields: []VaultField{
|
||||
{Label: "Username", Value: row[userCol], Kind: "text"},
|
||||
{Label: "Password", Value: row[passCol], Kind: "password"},
|
||||
},
|
||||
}
|
||||
if row[urlCol] != "" {
|
||||
entry.URLs = []string{row[urlCol]}
|
||||
}
|
||||
// Firefox stores timestamps as Unix microseconds
|
||||
if hasTimeChanged && len(row) > timeChangedCol && row[timeChangedCol] != "" {
|
||||
if us, err := strconv.ParseInt(row[timeChangedCol], 10, 64); err == nil && us > 0 {
|
||||
entry.SourceModified = us / 1_000_000 // microseconds → seconds
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
||||
// --- Bitwarden JSON ---
|
||||
type bitwardenExport struct {
|
||||
Items []bitwardenItem `json:"items"`
|
||||
}
|
||||
type bitwardenItem struct {
|
||||
Name string `json:"name"`
|
||||
Type int `json:"type"` // 1=login, 2=note, 3=card, 4=identity
|
||||
Notes string `json:"notes"`
|
||||
RevisionDate string `json:"revisionDate"` // RFC3339
|
||||
Login *bitwardenLogin `json:"login"`
|
||||
Card *bitwardenCard `json:"card"`
|
||||
Identity *bitwardenIdentity `json:"identity"`
|
||||
}
|
||||
type bitwardenLogin struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Totp string `json:"totp"`
|
||||
URIs []struct{ URI string `json:"uri"` } `json:"uris"`
|
||||
}
|
||||
type bitwardenCard struct {
|
||||
CardholderName string `json:"cardholderName"`
|
||||
Number string `json:"number"`
|
||||
ExpMonth string `json:"expMonth"`
|
||||
ExpYear string `json:"expYear"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
type bitwardenIdentity struct {
|
||||
FirstName string `json:"firstName"`
|
||||
LastName string `json:"lastName"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phone"`
|
||||
Address1 string `json:"address1"`
|
||||
City string `json:"city"`
|
||||
State string `json:"state"`
|
||||
PostalCode string `json:"postalCode"`
|
||||
Country string `json:"country"`
|
||||
}
|
||||
|
||||
func parseBitwardenJSON(content []byte) ([]VaultData, bool) {
|
||||
var bw bitwardenExport
|
||||
if err := json.Unmarshal(content, &bw); err != nil || len(bw.Items) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var entries []VaultData
|
||||
for _, item := range bw.Items {
|
||||
vd := VaultData{Title: item.Name, Notes: item.Notes}
|
||||
switch item.Type {
|
||||
case 1: // login
|
||||
vd.Type = "credential"
|
||||
if item.Login != nil {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Username", Value: item.Login.Username, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Password", Value: item.Login.Password, Kind: "password"})
|
||||
if item.Login.Totp != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "TOTP Seed", Value: item.Login.Totp, Kind: "totp"})
|
||||
}
|
||||
for _, u := range item.Login.URIs {
|
||||
if u.URI != "" {
|
||||
vd.URLs = append(vd.URLs, u.URI)
|
||||
}
|
||||
}
|
||||
}
|
||||
case 2: // note
|
||||
vd.Type = "note"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Content", Value: item.Notes, Kind: "text"})
|
||||
vd.Notes = ""
|
||||
case 3: // card
|
||||
vd.Type = "card"
|
||||
if item.Card != nil {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Cardholder", Value: item.Card.CardholderName, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Number", Value: item.Card.Number, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "CVV", Value: item.Card.Code, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Expiry", Value: item.Card.ExpMonth + "/" + item.Card.ExpYear, Kind: "text"})
|
||||
}
|
||||
case 4: // identity
|
||||
vd.Type = "identity"
|
||||
if item.Identity != nil {
|
||||
id := item.Identity
|
||||
addField := func(label, value string) {
|
||||
if value != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: label, Value: value, Kind: "text"})
|
||||
}
|
||||
}
|
||||
addField("First Name", id.FirstName)
|
||||
addField("Last Name", id.LastName)
|
||||
addField("Email", id.Email)
|
||||
addField("Phone", id.Phone)
|
||||
addField("Address", id.Address1)
|
||||
addField("City", id.City)
|
||||
addField("State", id.State)
|
||||
addField("ZIP", id.PostalCode)
|
||||
addField("Country", id.Country)
|
||||
}
|
||||
}
|
||||
// Parse revisionDate → SourceModified
|
||||
if item.RevisionDate != "" {
|
||||
if t, err := time.Parse(time.RFC3339, item.RevisionDate); err == nil {
|
||||
vd.SourceModified = t.Unix()
|
||||
}
|
||||
}
|
||||
entries = append(entries, vd)
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
||||
// --- Proton Pass JSON ---
|
||||
// Proton nests everything under item.data: type, metadata, content are all inside data.
|
||||
type protonExport struct {
|
||||
Vaults map[string]protonVault `json:"vaults"`
|
||||
}
|
||||
type protonVault struct {
|
||||
Items []protonItem `json:"items"`
|
||||
}
|
||||
type protonItem struct {
|
||||
Data protonItemData `json:"data"`
|
||||
ModifyTime int64 `json:"modifyTime"` // Unix seconds
|
||||
CreateTime int64 `json:"createTime"`
|
||||
}
|
||||
type protonItemData struct {
|
||||
Type string `json:"type"`
|
||||
Metadata protonMeta `json:"metadata"`
|
||||
Content protonContent `json:"content"`
|
||||
}
|
||||
type protonMeta struct {
|
||||
Name string `json:"name"`
|
||||
Note string `json:"note"`
|
||||
}
|
||||
type protonContent struct {
|
||||
// login
|
||||
ItemUsername string `json:"itemUsername"`
|
||||
ItemEmail string `json:"itemEmail"`
|
||||
Password string `json:"password"`
|
||||
Urls []string `json:"urls"`
|
||||
TOTPUri string `json:"totpUri"`
|
||||
// card
|
||||
CardholderName string `json:"cardholderName"`
|
||||
Number string `json:"number"`
|
||||
VerificationNumber string `json:"verificationNumber"`
|
||||
ExpirationDate string `json:"expirationDate"`
|
||||
// identity
|
||||
FullName string `json:"fullName"`
|
||||
Email string `json:"email"`
|
||||
Phone string `json:"phoneNumber"`
|
||||
// alias
|
||||
AliasEmail string `json:"aliasEmail"`
|
||||
}
|
||||
|
||||
func parseProtonJSON(content []byte) ([]VaultData, bool) {
|
||||
var pe protonExport
|
||||
if err := json.Unmarshal(content, &pe); err != nil || len(pe.Vaults) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var entries []VaultData
|
||||
for _, vault := range pe.Vaults {
|
||||
for _, item := range vault.Items {
|
||||
d := item.Data
|
||||
vd := VaultData{
|
||||
Title: d.Metadata.Name,
|
||||
Notes: d.Metadata.Note,
|
||||
URLs: d.Content.Urls,
|
||||
}
|
||||
// Pick best username: itemUsername, itemEmail, or email
|
||||
username := d.Content.ItemUsername
|
||||
if username == "" {
|
||||
username = d.Content.ItemEmail
|
||||
}
|
||||
switch d.Type {
|
||||
case "login":
|
||||
vd.Type = "credential"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Username", Value: username, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Password", Value: d.Content.Password, Kind: "password"})
|
||||
if d.Content.TOTPUri != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "TOTP Seed", Value: d.Content.TOTPUri, Kind: "totp"})
|
||||
}
|
||||
case "creditCard":
|
||||
vd.Type = "card"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Cardholder", Value: d.Content.CardholderName, Kind: "text"})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Number", Value: d.Content.Number, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "CVV", Value: d.Content.VerificationNumber, Kind: "text", L2: true})
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Expiry", Value: d.Content.ExpirationDate, Kind: "text"})
|
||||
case "identity":
|
||||
vd.Type = "identity"
|
||||
addF := func(l, v string) {
|
||||
if v != "" {
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: l, Value: v, Kind: "text"})
|
||||
}
|
||||
}
|
||||
addF("Full Name", d.Content.FullName)
|
||||
addF("Email", d.Content.Email)
|
||||
addF("Phone", d.Content.Phone)
|
||||
case "alias":
|
||||
continue // Proton-specific email alias — not a vault entry
|
||||
case "note":
|
||||
vd.Type = "note"
|
||||
vd.Fields = append(vd.Fields, VaultField{Label: "Content", Value: d.Metadata.Note, Kind: "text"})
|
||||
vd.Notes = ""
|
||||
default:
|
||||
vd.Type = "note"
|
||||
}
|
||||
if item.ModifyTime > 0 {
|
||||
vd.SourceModified = item.ModifyTime
|
||||
}
|
||||
entries = append(entries, vd)
|
||||
}
|
||||
}
|
||||
return entries, len(entries) > 0
|
||||
}
|
||||
|
|
@ -1,158 +0,0 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDetectAndParse_ChromeCSV(t *testing.T) {
|
||||
csv := "name,url,username,password\nGitHub,https://github.com,octocat,hunter2\nAWS,https://aws.amazon.com,admin,s3cret\n"
|
||||
entries, ok := DetectAndParse([]byte(csv))
|
||||
if !ok {
|
||||
t.Fatal("should detect Chrome CSV")
|
||||
}
|
||||
if len(entries) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(entries))
|
||||
}
|
||||
if entries[0].Title != "GitHub" {
|
||||
t.Errorf("title = %q", entries[0].Title)
|
||||
}
|
||||
if entries[0].Type != "credential" {
|
||||
t.Errorf("type = %q", entries[0].Type)
|
||||
}
|
||||
if len(entries[0].URLs) == 0 || entries[0].URLs[0] != "https://github.com" {
|
||||
t.Errorf("URL not parsed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_FirefoxCSV(t *testing.T) {
|
||||
csv := "url,username,password,httpRealm,formActionOrigin,guid,timeCreated,timeLastUsed,timePasswordChanged\nhttps://example.com,user@example.com,pass123,,,,,,1700000000000000\n"
|
||||
entries, ok := DetectAndParse([]byte(csv))
|
||||
if !ok {
|
||||
t.Fatal("should detect Firefox CSV")
|
||||
}
|
||||
if len(entries) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(entries))
|
||||
}
|
||||
// Firefox stores timePasswordChanged as microseconds
|
||||
if entries[0].SourceModified != 1700000000 {
|
||||
t.Errorf("SourceModified = %d, want 1700000000", entries[0].SourceModified)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_BitwardenJSON(t *testing.T) {
|
||||
json := `{"items":[{"name":"GitHub","type":1,"login":{"username":"octocat","password":"p@ss","uris":[{"uri":"https://github.com"}]},"revisionDate":"2024-01-15T10:00:00Z"}]}`
|
||||
entries, ok := DetectAndParse([]byte(json))
|
||||
if !ok {
|
||||
t.Fatal("should detect Bitwarden JSON")
|
||||
}
|
||||
if len(entries) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(entries))
|
||||
}
|
||||
if entries[0].Title != "GitHub" {
|
||||
t.Errorf("title = %q", entries[0].Title)
|
||||
}
|
||||
if entries[0].SourceModified == 0 {
|
||||
t.Error("SourceModified should be parsed from revisionDate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_BitwardenCard(t *testing.T) {
|
||||
json := `{"items":[{"name":"Amex","type":3,"card":{"cardholderName":"Johan","number":"378282246310005","code":"1234","expMonth":"09","expYear":"28"}}]}`
|
||||
entries, ok := DetectAndParse([]byte(json))
|
||||
if !ok {
|
||||
t.Fatal("should detect Bitwarden card")
|
||||
}
|
||||
if entries[0].Type != "card" {
|
||||
t.Errorf("type = %q, want card", entries[0].Type)
|
||||
}
|
||||
// Card number and CVV should be auto-flagged L2
|
||||
for _, f := range entries[0].Fields {
|
||||
if f.Label == "Number" && !f.L2 {
|
||||
t.Error("card number should be L2")
|
||||
}
|
||||
if f.Label == "CVV" && !f.L2 {
|
||||
t.Error("CVV should be L2")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAndParse_unknown_format(t *testing.T) {
|
||||
_, ok := DetectAndParse([]byte("this is not a known format"))
|
||||
if ok {
|
||||
t.Error("should not detect unknown format")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_labels(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Bank",
|
||||
Fields: []VaultField{
|
||||
{Label: "Username", Value: "user", Kind: "text"},
|
||||
{Label: "Card Number", Value: "4111111111111111", Kind: "text"},
|
||||
{Label: "CVV", Value: "123", Kind: "text"},
|
||||
{Label: "SSN", Value: "123-45-6789", Kind: "text"},
|
||||
{Label: "API Key", Value: "sk_live_abc", Kind: "text"},
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
expectations := map[string]bool{
|
||||
"Username": false,
|
||||
"Card Number": true,
|
||||
"CVV": true,
|
||||
"SSN": true,
|
||||
"API Key": false,
|
||||
}
|
||||
for _, f := range entries[0].Fields {
|
||||
want, ok := expectations[f.Label]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if f.L2 != want {
|
||||
t.Errorf("field %q: L2=%v, want %v", f.Label, f.L2, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_title_match_marks_all(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Coinbase Wallet",
|
||||
Fields: []VaultField{
|
||||
{Label: "Email", Value: "me@example.com", Kind: "text"},
|
||||
{Label: "Password", Value: "secret", Kind: "password"},
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
for _, f := range entries[0].Fields {
|
||||
if !f.L2 {
|
||||
t.Errorf("field %q should be L2 (title matched crypto exchange)", f.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoL2Fields_multilingual(t *testing.T) {
|
||||
entries := []VaultData{
|
||||
{
|
||||
Title: "Docs",
|
||||
Fields: []VaultField{
|
||||
{Label: "Paspoort", Value: "NL12345", Kind: "text"}, // Dutch
|
||||
{Label: "Führerschein", Value: "DE12345", Kind: "text"}, // German
|
||||
{Label: "身份证", Value: "CN12345", Kind: "text"}, // Chinese
|
||||
{Label: "パスポート", Value: "JP12345", Kind: "text"}, // Japanese
|
||||
{Label: "PESEL", Value: "PL12345", Kind: "text"}, // Polish
|
||||
},
|
||||
},
|
||||
}
|
||||
AutoL2Fields(entries)
|
||||
|
||||
for _, f := range entries[0].Fields {
|
||||
if !f.L2 {
|
||||
t.Errorf("field %q should be auto-detected as L2", f.Label)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
package lib
|
||||
|
||||
import "strings"
|
||||
|
||||
// L2Labels contains substrings that mark a field as L2 (client-side encrypted
|
||||
// only) during import. Matching is case-insensitive: if any substring appears
|
||||
// in a field label, that field is flagged L2.
|
||||
//
|
||||
// L2 = things an AI agent should NEVER need. Personal identity, payment cards,
|
||||
// government IDs. NOT API keys, SSH keys, TOTP — those are L1 (agent-readable).
|
||||
//
|
||||
// Contributing: add new terms anywhere in the list. Group by language or
|
||||
// category, keep entries lowercase, and include a comment for the language.
|
||||
var L2Labels = []string{
|
||||
// Card / payment
|
||||
"cvv", "cvc", "csv", "security code", "card number", "card no",
|
||||
"pin code", "pin-code",
|
||||
|
||||
// Banking
|
||||
"routing number", "account number", "iban", "swift", "sort code",
|
||||
|
||||
// Government ID — English
|
||||
"ssn", "social security", "passport", "driver license", "driver's license",
|
||||
"driving license", "driving licence", "national id", "id card", "id number",
|
||||
"tax id", "identification number",
|
||||
|
||||
// Dutch — BSN = burgerservicenummer
|
||||
"bsn", "burgerservicenummer", "rijbewijs", "paspoort", "identiteitskaart", "identiteitsbewijs",
|
||||
|
||||
// German — SVN = Sozialversicherungsnummer, StID = Steuer-ID
|
||||
"sozialversicherungsnummer", "steuer-id", "steuernummer",
|
||||
"führerschein", "fuhrerschein", "sozialversicherung", "reisepass", "personalausweis",
|
||||
|
||||
// French — NIR = numéro d'inscription au répertoire, CNI = carte nationale d'identité
|
||||
"nir", "cni", "numéro de sécurité", "numero de securite",
|
||||
"permis de conduire", "carte d'identit", "carte d identit", "passeport",
|
||||
|
||||
// Spanish — DNI, NIE, NIF, CURP (Mexico)
|
||||
"dni", "nie", "nif", "curp",
|
||||
"licencia de conducir", "seguro social", "pasaporte", "tarjeta de identidad", "cédula", "cedula",
|
||||
|
||||
// Portuguese — CPF, CNH = carteira nacional de habilitação, RNE = registro nacional de estrangeiros
|
||||
"cpf", "cnh", "rne",
|
||||
"carteira de motorista", "carteira de identidade", "passaporte",
|
||||
|
||||
// Italian — CF = codice fiscale, tessera sanitaria
|
||||
"codice fiscale", "tessera sanitaria",
|
||||
"patente di guida", "passaporto", "carta d'identit", "carta d identit",
|
||||
|
||||
// Chinese — 身份证号 = ID number, 社会保障号 = social security number
|
||||
"身份证", "护照", "驾照", "驾驶证", "社保", "社会保障号", "居民身份",
|
||||
|
||||
// Japanese — マイナンバー = My Number, 運転免許証 = driver's license
|
||||
"パスポート", "免許", "マイナンバー", "運転免許証", "住民票",
|
||||
|
||||
// Korean — 주민등록번호 = resident registration number
|
||||
"여권", "운전면허", "주민등록", "주민등록번호", "외국인등록",
|
||||
|
||||
// Russian — ИНН = tax ID, СНИЛС = social insurance, ВУ = driver's license
|
||||
"паспорт", "водительск", "снилс", "инн",
|
||||
|
||||
// Arabic — رقم الهوية = ID number, جواز سفر = passport
|
||||
"جواز سفر", "رخصة قيادة", "بطاقة هوية", "رقم الهوية",
|
||||
|
||||
// Hindi — PAN = permanent account number, Aadhaar
|
||||
"पासपोर्ट", "आधार", "लाइसेंस", "pan card",
|
||||
|
||||
// Turkish — TC Kimlik = national ID number
|
||||
"pasaport", "ehliyet", "kimlik numar", "tc kimlik", "nüfus",
|
||||
|
||||
// Polish — PESEL = national ID, NIP = tax ID, dowód = ID card
|
||||
"pesel", "nip",
|
||||
"paszport", "prawo jazdy", "dowód osobisty", "dowod osobisty",
|
||||
|
||||
// Swedish — pass = passport, samordningsnummer = coordination number
|
||||
"körkort", "personnummer", "samordningsnummer",
|
||||
|
||||
// Thai — บัตรประชาชน = ID card, หนังสือเดินทาง = passport, ใบขับขี่ = driver's license
|
||||
"บัตรประชาชน", "หนังสือเดินทาง", "ใบขับขี่",
|
||||
|
||||
// Vietnamese — CMND/CCCD = citizen ID, hộ chiếu = passport, GPLX = driver's license
|
||||
"cmnd", "cccd", "hộ chiếu", "ho chieu",
|
||||
}
|
||||
|
||||
// L2Titles contains substrings matched against entry titles. If an entry's
|
||||
// title matches, ALL fields in that entry are flagged L2.
|
||||
// These are things a human needs but an agent never would.
|
||||
var L2Titles = []string{
|
||||
// Recovery / backup codes — human-only fallback
|
||||
"backup code", "recovery code", "recovery key", "backup key",
|
||||
"restore code", "restore key", "reset code",
|
||||
|
||||
// Crypto wallet seeds — human-only
|
||||
"seed phrase", "mnemonic", "recovery phrase", "wallet seed",
|
||||
|
||||
// Pairing codes — one-time human setup
|
||||
"pairing code", "pairing key",
|
||||
|
||||
// Crypto exchanges & wallets — entire record is sensitive
|
||||
"coinbase", "binance", "kraken", "gemini", "bitstamp", "bitfinex",
|
||||
"crypto.com", "kucoin", "bybit", "okx", "gate.io", "huobi", "htx",
|
||||
"bitget", "mexc", "upbit", "bithumb",
|
||||
"aa.com",
|
||||
"metamask", "phantom", "ledger", "trezor", "exodus", "trust wallet",
|
||||
"electrum", "myetherwallet", "blockchain.com",
|
||||
}
|
||||
|
||||
// AutoL2Fields scans all fields in each VaultData and sets L2=true if the
|
||||
// field label or entry title matches a sensitive pattern. Called after import.
|
||||
func AutoL2Fields(entries []VaultData) {
|
||||
for i := range entries {
|
||||
// Check title — if it matches, mark ALL fields L2
|
||||
titleLower := strings.ToLower(entries[i].Title)
|
||||
titleMatch := false
|
||||
for _, pat := range L2Titles {
|
||||
if strings.Contains(titleLower, pat) {
|
||||
titleMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if titleMatch {
|
||||
for j := range entries[i].Fields {
|
||||
entries[i].Fields[j].L2 = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check individual field labels
|
||||
for j := range entries[i].Fields {
|
||||
if entries[i].Fields[j].L2 {
|
||||
continue
|
||||
}
|
||||
lower := strings.ToLower(entries[i].Fields[j].Label)
|
||||
for _, pat := range L2Labels {
|
||||
if strings.Contains(lower, pat) {
|
||||
entries[i].Fields[j].L2 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -124,7 +124,8 @@ type VaultData struct {
|
|||
AllowedIPs string `json:"allowed_ips,omitempty"` // comma-separated CIDRs or FQDNs
|
||||
RateLimit int `json:"rate_limit,omitempty"` // max unique entries per minute; 0 = unlimited
|
||||
RateLimitHour int `json:"rate_limit_hour,omitempty"` // max unique entries per hour; 0 = unlimited
|
||||
Locked bool `json:"locked,omitempty"` // true = rate limit hit, requires PRF unlock
|
||||
Locked bool `json:"locked,omitempty"` // true = locked after second strike, owner unlock required
|
||||
LastStrikeAt int64 `json:"last_strike_at,omitempty"` // unix ts of most recent hour-limit hit; second strike within 2h triggers lock
|
||||
|
||||
// Scope-specific fields (only present when type = "scope")
|
||||
ScopeID string `json:"scope_id,omitempty"` // 32-char hex (16 bytes)
|
||||
|
|
@ -157,6 +158,10 @@ type Entry struct {
|
|||
}
|
||||
|
||||
// AgentData is the in-memory representation of an agent after decrypting its entry.
|
||||
//
|
||||
// VETO: this struct must NEVER hold L2 or L3 plaintext. The agent's L2 key is
|
||||
// the agent's own — it lives in the agent's CVT credential (browser/CLI side)
|
||||
// and is never carried into the server.
|
||||
type AgentData struct {
|
||||
AgentID string // 32-char hex
|
||||
Name string
|
||||
|
|
@ -166,8 +171,8 @@ type AgentData struct {
|
|||
AllowedIPs string // comma-separated CIDRs or FQDNs; empty = not set yet (first contact fills it)
|
||||
RateLimit int // max unique entries per minute; 0 = unlimited
|
||||
RateLimitHour int // max unique entries per hour; 0 = unlimited
|
||||
Locked bool // true = rate limit hit, requires PRF unlock
|
||||
L2 []byte // 16-byte key for agent re-encryption (L1-encrypted in DB)
|
||||
Locked bool // true = locked after second strike, owner unlock required
|
||||
LastStrikeAt int64 // unix ts of most recent hour-limit hit; second strike within 2h triggers lock
|
||||
EntryID HexID // entry ID of the agent record (for updating AllowedIPs on first contact)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,138 @@
|
|||
package lib
|
||||
|
||||
// WL3 (Wrapped Layer-3) credential storage.
|
||||
//
|
||||
// Each credential is one JSON file at:
|
||||
//
|
||||
// <baseDir>/<firstByteHex>/<fullP1Hex>.json
|
||||
//
|
||||
// e.g. WL3/ab/ab2f7c8d9e1f4a3b.json
|
||||
//
|
||||
// The file holds the wrapped L3 key and the metadata needed to route a login
|
||||
// to the correct vault and POP. It contains NO personal data — no customer
|
||||
// id, no name, no email — so the WL3 corpus is GDPR-out-of-scope by design.
|
||||
// The link from a credential to a human lives only in the central admin DB.
|
||||
//
|
||||
// The on-disk format is identical between community and commercial editions.
|
||||
// Only the sync layer (commercial) differs.
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ErrWL3NotFound is returned by WL3Read when the credential file is missing.
|
||||
var ErrWL3NotFound = errors.New("wl3: credential not found")
|
||||
|
||||
// WL3Entry is the on-disk JSON shape for a single credential.
|
||||
//
|
||||
// Field names are lowercase to match the agreed schema. wrapped_l3,
|
||||
// credential_id, and public_key are base64url-encoded (no padding).
|
||||
// p1 and l0 are hex-encoded.
|
||||
type WL3Entry struct {
|
||||
P1 string `json:"p1"` // 16 hex chars (8 bytes)
|
||||
L0 string `json:"l0"` // 8 hex chars (4 bytes)
|
||||
WrappedL3 string `json:"wrapped_l3"` // base64url, no padding
|
||||
CredentialID string `json:"credential_id"` // base64url, no padding
|
||||
PublicKey string `json:"public_key"` // base64url, no padding
|
||||
HomePOP string `json:"home_pop"` // e.g. "uk1", empty in community
|
||||
CreatedAt int64 `json:"created_at"` // unix seconds
|
||||
}
|
||||
|
||||
// WL3Path returns the absolute path for a credential file given its P1.
|
||||
// p1 must be exactly 8 bytes.
|
||||
func WL3Path(baseDir string, p1 []byte) (string, error) {
|
||||
if len(p1) != 8 {
|
||||
return "", fmt.Errorf("wl3: P1 must be 8 bytes, got %d", len(p1))
|
||||
}
|
||||
full := hex.EncodeToString(p1)
|
||||
shard := full[:2]
|
||||
return filepath.Join(baseDir, shard, full+".json"), nil
|
||||
}
|
||||
|
||||
// WL3Write atomically writes a credential file. The shard directory is
|
||||
// created if missing. If a file already exists at the path, the write is a
|
||||
// no-op (collisions are practically impossible at 8-byte P1, and we never
|
||||
// edit credentials in place — see SECURITY.md).
|
||||
//
|
||||
// Returns an error if the file exists with different content (would indicate
|
||||
// a P1 collision or corruption — both load-bearing security failures).
|
||||
func WL3Write(baseDir string, entry *WL3Entry) error {
|
||||
if entry == nil {
|
||||
return errors.New("wl3: nil entry")
|
||||
}
|
||||
if len(entry.P1) != 16 {
|
||||
return fmt.Errorf("wl3: P1 hex must be 16 chars, got %d", len(entry.P1))
|
||||
}
|
||||
p1Bytes, err := hex.DecodeString(entry.P1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wl3: P1 not valid hex: %w", err)
|
||||
}
|
||||
path, err := WL3Path(baseDir, p1Bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newBytes, err := json.MarshalIndent(entry, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("wl3: marshal: %w", err)
|
||||
}
|
||||
|
||||
// If a file already exists, refuse to overwrite unless content is identical.
|
||||
if existing, err := os.ReadFile(path); err == nil {
|
||||
if bytesEqual(existing, newBytes) {
|
||||
return nil // idempotent
|
||||
}
|
||||
return fmt.Errorf("wl3: refusing to overwrite existing credential at %s (P1 collision or corruption)", path)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return fmt.Errorf("wl3: mkdir: %w", err)
|
||||
}
|
||||
|
||||
tmp := path + ".tmp"
|
||||
if err := os.WriteFile(tmp, newBytes, 0o600); err != nil {
|
||||
return fmt.Errorf("wl3: write tmp: %w", err)
|
||||
}
|
||||
if err := os.Rename(tmp, path); err != nil {
|
||||
_ = os.Remove(tmp)
|
||||
return fmt.Errorf("wl3: rename: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WL3Read loads a credential file by P1.
|
||||
func WL3Read(baseDir string, p1 []byte) (*WL3Entry, error) {
|
||||
path, err := WL3Path(baseDir, p1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, ErrWL3NotFound
|
||||
}
|
||||
return nil, fmt.Errorf("wl3: read: %w", err)
|
||||
}
|
||||
var entry WL3Entry
|
||||
if err := json.Unmarshal(data, &entry); err != nil {
|
||||
return nil, fmt.Errorf("wl3: parse %s: %w", path, err)
|
||||
}
|
||||
return &entry, nil
|
||||
}
|
||||
|
||||
func bytesEqual(a, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
package lib
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWL3Path_ShardingAndShape(t *testing.T) {
|
||||
p1, _ := hex.DecodeString("ab2f7c8d9e1f4a3b")
|
||||
got, err := WL3Path("/var/wl3", p1)
|
||||
if err != nil {
|
||||
t.Fatalf("WL3Path: %v", err)
|
||||
}
|
||||
want := filepath.Join("/var/wl3", "ab", "ab2f7c8d9e1f4a3b.json")
|
||||
if got != want {
|
||||
t.Errorf("path = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWL3Path_RejectsBadLength(t *testing.T) {
|
||||
if _, err := WL3Path("/x", []byte{1, 2, 3}); err == nil {
|
||||
t.Error("expected error for 3-byte P1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWL3Write_Read_RoundTrip(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
entry := &WL3Entry{
|
||||
P1: "ab2f7c8d9e1f4a3b",
|
||||
L0: "12345678",
|
||||
WrappedL3: "d3JhcHBlZF9sM19ieXRlcw",
|
||||
CredentialID: "Y3JlZF9pZF9ieXRlcw",
|
||||
PublicKey: "cHViX2tleV9ieXRlcw",
|
||||
HomePOP: "uk1",
|
||||
CreatedAt: 1712534400,
|
||||
}
|
||||
if err := WL3Write(dir, entry); err != nil {
|
||||
t.Fatalf("write: %v", err)
|
||||
}
|
||||
|
||||
// File should land at the sharded path.
|
||||
expected := filepath.Join(dir, "ab", "ab2f7c8d9e1f4a3b.json")
|
||||
if _, err := os.Stat(expected); err != nil {
|
||||
t.Fatalf("expected file at %s, stat err: %v", expected, err)
|
||||
}
|
||||
|
||||
p1Bytes, _ := hex.DecodeString(entry.P1)
|
||||
loaded, err := WL3Read(dir, p1Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("read: %v", err)
|
||||
}
|
||||
if loaded.L0 != entry.L0 || loaded.WrappedL3 != entry.WrappedL3 ||
|
||||
loaded.CredentialID != entry.CredentialID || loaded.PublicKey != entry.PublicKey ||
|
||||
loaded.HomePOP != entry.HomePOP || loaded.CreatedAt != entry.CreatedAt {
|
||||
t.Errorf("round-trip mismatch: got %+v", loaded)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWL3Read_NotFound(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
p1, _ := hex.DecodeString("ffffffffffffffff")
|
||||
_, err := WL3Read(dir, p1)
|
||||
if !errors.Is(err, ErrWL3NotFound) {
|
||||
t.Errorf("expected ErrWL3NotFound, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWL3Write_IdempotentOnIdenticalContent(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
entry := &WL3Entry{
|
||||
P1: "0011223344556677", L0: "deadbeef",
|
||||
WrappedL3: "x", CredentialID: "y", PublicKey: "z",
|
||||
HomePOP: "uk1", CreatedAt: 1,
|
||||
}
|
||||
if err := WL3Write(dir, entry); err != nil {
|
||||
t.Fatalf("first write: %v", err)
|
||||
}
|
||||
if err := WL3Write(dir, entry); err != nil {
|
||||
t.Errorf("second write of identical content should be no-op, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWL3Write_RefusesOverwriteWithDifferentContent(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
entry := &WL3Entry{
|
||||
P1: "0011223344556677", L0: "deadbeef",
|
||||
WrappedL3: "x", CredentialID: "y", PublicKey: "z",
|
||||
HomePOP: "uk1", CreatedAt: 1,
|
||||
}
|
||||
if err := WL3Write(dir, entry); err != nil {
|
||||
t.Fatalf("first write: %v", err)
|
||||
}
|
||||
entry2 := *entry
|
||||
entry2.WrappedL3 = "different"
|
||||
if err := WL3Write(dir, &entry2); err == nil {
|
||||
t.Error("expected error when overwriting with different content")
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue