61 lines
1.4 KiB
Go
61 lines
1.4 KiB
Go
package lib
|
|
|
|
import (
|
|
"os"
|
|
)
|
|
|
|
// Config holds application configuration.
|
|
type Config struct {
|
|
Port string // default "1984"
|
|
DataDir string // directory for vault DB files
|
|
Mode string // "self-hosted" (default) or "hosted"
|
|
FireworksAPIKey string
|
|
LLMBaseURL string // OpenAI-compatible base URL
|
|
LLMModel string // default llama-v3p3-70b-instruct
|
|
SessionTTL int64 // default 86400 (24 hours)
|
|
}
|
|
|
|
// LoadConfig loads configuration from environment variables.
|
|
func LoadConfig() (*Config, error) {
|
|
port := os.Getenv("PORT")
|
|
if port == "" {
|
|
port = "1984"
|
|
}
|
|
|
|
mode := os.Getenv("VAULT_MODE")
|
|
if mode == "" {
|
|
mode = "self-hosted"
|
|
}
|
|
|
|
dataDir := os.Getenv("DATA_DIR")
|
|
if dataDir == "" {
|
|
dataDir = "."
|
|
}
|
|
|
|
fireworksKey := os.Getenv("LLM_API_KEY")
|
|
if fireworksKey == "" {
|
|
fireworksKey = os.Getenv("FIREWORKS_API_KEY") // legacy
|
|
}
|
|
llmModel := os.Getenv("LLM_MODEL")
|
|
if llmModel == "" {
|
|
llmModel = "accounts/fireworks/models/llama-v3p3-70b-instruct"
|
|
}
|
|
|
|
llmBaseURL := os.Getenv("LLM_BASE_URL")
|
|
if llmBaseURL == "" {
|
|
llmBaseURL = "https://api.fireworks.ai/inference/v1"
|
|
}
|
|
|
|
sessionTTL := int64(86400) // 24 hours default
|
|
|
|
return &Config{
|
|
Port: port,
|
|
DataDir: dataDir,
|
|
Mode: mode,
|
|
FireworksAPIKey: fireworksKey,
|
|
LLMBaseURL: llmBaseURL,
|
|
LLMModel: llmModel,
|
|
SessionTTL: sessionTTL,
|
|
}, nil
|
|
}
|