diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..32270b2 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,55 @@ +# ============================================================================== +# APPLICATION +# ============================================================================== +APP_ENV=development +SERVER_PORT=8080 +SERVER_READ_TIMEOUT=10s +SERVER_WRITE_TIMEOUT=10s + +# ============================================================================== +# DATABASE (PostgreSQL) +# ============================================================================== +DB_HOST=localhost +DB_PORT=5432 +DB_USER=aurganize +DB_PASSWORD=aurganize_dev_pass_change_in_production +DB_NAME=aurganize_v62 +DB_SSLMODE=disable + +# Connection Pool +DB_MAX_OPEN_CONNS=25 +DB_MAX_IDLE_CONNS=5 +DB_CONN_MAX_LIFETIME=5m + +# ============================================================================== +# JWT AUTHENTICATION +# ============================================================================== +# IMPORTANT: Change these secrets in production! +# Generate with: openssl rand -base64 32 +JWT_ACCESS_SECRET=your-super-secret-access-key-min-32-chars-change-in-production +JWT_REFRESH_SECRET=your-super-secret-refresh-key-min-32-chars-must-be-different +JWT_ACCESS_EXPIRY=15m +JWT_REFRESH_EXPIRY=168h +JWT_ISSUER=aurganize-v62 + +# ============================================================================== +# REDIS (Caching & Sessions) +# ============================================================================== +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +REDIS_DB=0 + +# ============================================================================== +# NATS (Event Messaging) +# ============================================================================== +NATS_URL=nats://localhost:4222 + +# ============================================================================== +# MINIO (S3-Compatible Storage) +# ============================================================================== +MINIO_ENDPOINT=localhost:9000 +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +MINIO_BUCKET=aurganize +MINIO_USE_SSL=false \ No newline at end of file diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go new file mode 100644 index 0000000..fa1ab43 --- /dev/null +++ b/backend/cmd/api/main.go @@ -0,0 +1,313 @@ +package main + +import ( + "context" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/creativenoz/aurganize-v62/backend/internal/config" + "github.com/creativenoz/aurganize-v62/backend/pkg/logger" + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog/log" +) + +func main() { + // ========================================================================= + // Loading Configuration + // ========================================================================= + cfg, err := config.Load() + if err != nil { + // we are not using logger here, since we need config information to set the log level + fmt.Fprintf(os.Stderr, "Failed to load configurations : %v\n", err) + // hence when config load fails we exit application, cause have not point in continuing further + os.Exit(1) + } + // ========================================================================= + // Initializing Logger + // ========================================================================= + logger.Init(cfg.Server.Environment) + log.Info(). + Str("Version", "0.6.2"). + Str("environment", cfg.Server.Environment). + Msg("Starting Aurganize v6.2 API server") + + // ========================================================================= + // Create Echo Instance + // ========================================================================= + e := echo.New() + e.HideBanner = true + e.HidePort = true + + e.HTTPErrorHandler = customHTTPErrorHandler // we are using a custom error handler + + e.Server.ReadTimeout = cfg.Server.ReadTimeout + e.Server.WriteTimeout = cfg.Server.WriteTimeout + + log.Info().Msg("Echo server instance created") + // ========================================================================= + // Middleware Pipeline + // ========================================================================= + + // Setting safe recover middleware + e.Use(middleware.Recover()) + // Middleware catches panic + // Returns 500 Internal Server Error + // Server keeps running + // ------------------------------------------------------------------------- + // Setting request ID middleware + e.Use(middleware.RequestID()) + // Trace request through entire system + // Link frontend error to backend logs + // This adds a header : X-Request-ID: abc-123-def-456 + // ------------------------------------------------------------------------ + // Setting Logger format + e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ + Format: `{"time":"${time_rfc3339}","method":"${method}","uri":"${uri}",` + + `"status":${status},"latency_ms":${latency_ms},"error":"${error}"}` + "\n", + Output: log.Logger, + })) + // We are setting a custom log format, which is consisten with our logger format + // { + // "time": "2025-11-26T10:30:45Z", + // "method": "POST", + // "uri": "/api/v1/login", + // "status": 200, + // "latency_ms": 45, + // "error": "" + // } + // ----------------------------------------------------------------------- + // Setting CORS (Cross-Origin Resource Sharing) middleware + e.Use(middleware.CORSWithConfig(middleware.CORSConfig{ + AllowOrigins: []string{ + "http://localhost:5173", // (Development) Svelte dev server : this is the port suggested to be used with front-end + "http://localhost:3000", // (Developement) Alternative dev port : this is an alternative port kept aside + "https://app.aurganize.com", // (Production) Production frontend : we can use this subdomain itself for front-end + }, + AllowMethods: []string{ + http.MethodGet, + http.MethodPost, + http.MethodPut, + http.MethodDelete, + http.MethodPatch, + http.MethodOptions, + }, + AllowHeaders: []string{ + "Origin", + "Content-Type", + "Accept", + "Authorization", + "X-Request-ID", + }, + + AllowCredentials: true, // Not sure about why are using this option + + MaxAge: 3600, // 1 hour in seconds + })) + // Prevents malicious sites from calling your API + // ---------------------------------------------------------------------- + // Setting Security Headers middleware + e.Use(middleware.SecureWithConfig(middleware.SecureConfig{ + XSSProtection: "1; mode=block", + ContentTypeNosniff: "nosniff", + XFrameOptions: "SAMEORIGIN", + HSTSMaxAge: 31536000, + HSTSExcludeSubdomains: false, + ContentSecurityPolicy: "default-src 'self'", + })) + // X-XSS-Protection: + // - Blocks cross-site scripting attacks + // - Browser detects XSS and blocks page + + // X-Content-Type-Options: nosniff: + // - Prevents MIME-type sniffing + // - Browser trusts Content-Type header + // - Prevents executing scripts as HTML + + // X-Frame-Options: SAMEORIGIN: + // - Prevents clickjacking + // - Page can't be embedded in iframe (except same origin) + // - Protects against UI redress attacks + + // Strict-Transport-Security (HSTS): + // - Forces HTTPS for 1 year + // - Prevents downgrade attacks + // - Can't be disabled by user + + // Content-Security-Policy: + // - Only load resources from same origin + // - Prevents loading malicious scripts + // - Additional layer of XSS protection + // ------------------------------------------------------------------- + // Setting Gzip compression middleware + e.Use(middleware.Gzip()) + + // TODO : Rate Limiting middleware (planning to use redis for custom rate limiter) + + log.Info().Msg("Middleware configured") + + // ========================================================================= + // Middleware Pipeline + // ========================================================================= + + e.GET("/health", healthCheckHandler(cfg)) // (Public - health check) + api := e.Group("/api/v6.2") + + api.GET("/ping", func(c echo.Context) error { // (Public - connectivity test) + return c.JSON(http.StatusOK, map[string]string{ + "message": "pika pikaaa", + "timestamp": time.Now().UTC().Format(time.RFC3339), + "version": "0.6.2", + }) + }) + + log.Info().Msg("Routes configured") + // ========================================================================= + // Start Server in a new thread + // ========================================================================= + + serverAddr := fmt.Sprintf(":%s", cfg.Server.Port) + + go func() { + log.Info(). + Str("address", serverAddr). + Str("environment", cfg.Server.Environment). + Msg("Server starting") + + if err := e.Start(serverAddr); err != nil && err != http.ErrServerClosed { + log.Fatal(). + Err(err). + Msg("Failed to start server") + } + }() + // ========================================================================= + // Shutdown Logic + // ========================================================================= + + quit := make(chan os.Signal, 1) + // Creates a channel named quit that can carry values of type os.Signal. + // The 1 means it’s a buffered channel with capacity 1 → it can hold one signal without blocking. + // This channel will be used to receive OS signals like Ctrl+C or kill. + + signal.Notify(quit, os.Interrupt, syscall.SIGTERM) + // Whenever the process receives any of these signals, send them into the quit channel.” + // os.Interrupt → typically the signal sent when you press Ctrl+C in the terminal. + // syscall.SIGTERM → the “please terminate” signal, used by process managers / Docker / Kubernetes, + + <-quit // Blocks until we get a signal in this channel + // This is a receive operation on the channel. + // The code blocks here and does nothing until: + // the OS sends os.Interrupt or SIGTERM, + // which signal.Notify pushes into quit. + // When a signal arrives, <-quit unblocks and the program continues. + + log.Info().Msg("Shutting down server gracefully..") + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + // Creates a context with timeout of 10 seconds. + // This context is passed to e.Shutdown(ctx) + // Echo will have at most 10 seconds to shut down gracefully. + // After 10 seconds, the context is cancelled, and shutdown will be forced. + + if err := e.Shutdown(ctx); err != nil { + log.Error(). + Err(err). + Msg("Server forced to shutdown, graceful shutdown failed") + } + // Calls Echo’s Shutdown method with your timeout context. + // e.Shutdown(ctx): + // -- stops accepting new requests, + // -- waits for in-flight requests to finish, + // -- closes the server gracefully (within the timeout). + // If something goes wrong (e.g., it can’t shut down in time), err is non-nil: + // -- logs an error saying graceful shutdown failed and it had to force close. + + log.Info().Msg("API Server exited") + +} + +// HealthCheck Handler function +// This endpoint is to be used by: +// - Load balancers to determine if instance is healthy +// - Kubernetes for liveness and readiness probes +// - Monitoring systems for uptime checks +func healthCheckHandler(c *config.Config) echo.HandlerFunc { + return func(e echo.Context) error { + // TODO : we need to add health check for + // - Database connection + // - Redis connection if we are using it + // - NATS connection + + // For now we are just returning OK, since most of the service dependecies are not implemented yet. + response := map[string]interface{}{ + "status": "healthy", + "version": "0.6.2", + "environment": c.Server.Environment, + "timestamp": time.Now().UTC(), + "checks": map[string]string{ + "server": "ok", + "database": "not setup", + "redis": "not setup", + "nats": "not setup", + }, + "uptime": 999999999999, // logic yet to be implemented + } + return e.JSON(http.StatusOK, response) + } +} + +// CustomHTTPErrorHandler +// Used to provide consistent error response +// Converts Echo erros to JSON format, that can be parsed at frontend +func customHTTPErrorHandler(err error, c echo.Context) { + + // Setting default values + code := http.StatusInternalServerError + message := "internal server error" + + // Checking if the error is and Echo error + // We do this through a type assertion err.(*echo.HTTPError) [ basetype.(type to assert to)] + if he, ok := err.(*echo.HTTPError); ok { + code = he.Code + // Then we again check with a type assertion + if msg, ok := he.Message.(string); ok { + message = msg + } + } + + log.Debug(). + Err(err). + Int("status", code). + Str("method", c.Request().Method). + Str("path", c.Request().URL.Path). + Msg("HTTP error") + + if code >= 500 { + log.Error(). + Err(err). + Int("status", code). + Str("method", c.Request().Method). + Str("path", c.Request().URL.Path). + Msg("HTTP error") + } + + // If the response is not already written + // Then we don't have to again log + if !c.Response().Committed { + c.JSON(code, map[string]interface{}{ + "error": map[string]interface{}{ + "code": code, + "message": message, + "timestamp": time.Now().UTC(), + "path": c.Request().URL.Path, + "request_id": c.Response().Header().Get(echo.HeaderXRequestID), + }, + }) + } + +} diff --git a/backend/database/migrations/000001_initial_schema.down.sql b/backend/database/migrations/000001_initial_schema.down.sql new file mode 100644 index 0000000..13552c2 --- /dev/null +++ b/backend/database/migrations/000001_initial_schema.down.sql @@ -0,0 +1,33 @@ +-- ============================================================================= +-- ROLLBACK: 000001_initial_schema +-- ============================================================================= + +-- Drop tables in reverse order (respecting foreign keys) +DROP TABLE IF EXISTS notifications CASCADE; +DROP TABLE IF EXISTS analytics_events CASCADE; +DROP TABLE IF EXISTS audit_logs CASCADE; +DROP TABLE IF EXISTS attachments CASCADE; +DROP TABLE IF EXISTS comments CASCADE; +DROP TABLE IF EXISTS milestones CASCADE; +DROP TABLE IF EXISTS deliverables CASCADE; +DROP TABLE IF EXISTS contracts CASCADE; +DROP TABLE IF EXISTS users CASCADE; +DROP TABLE IF EXISTS tenants CASCADE; + +-- Drop functions +DROP FUNCTION IF EXISTS update_updated_at_column() CASCADE; +DROP FUNCTION IF EXISTS set_tenant_context(UUID) CASCADE; +DROP FUNCTION IF EXISTS get_current_tenant() CASCADE; + +-- Drop enums +DROP TYPE IF EXISTS milestone_status CASCADE; +DROP TYPE IF EXISTS milestone_type CASCADE; +DROP TYPE IF EXISTS deliverable_status CASCADE; +DROP TYPE IF EXISTS contract_status CASCADE; +DROP TYPE IF EXISTS tenant_type CASCADE; +DROP TYPE IF EXISTS user_role CASCADE; + +-- Drop extensions (optional - might be used by other databases) +-- DROP EXTENSION IF EXISTS "btree_gin"; +-- DROP EXTENSION IF EXISTS "pg_trgm"; +-- DROP EXTENSION IF EXISTS "uuid-ossp"; \ No newline at end of file diff --git a/backend/database/migrations/000001_initial_schema.up.sql b/backend/database/migrations/000001_initial_schema.up.sql new file mode 100644 index 0000000..80c4671 --- /dev/null +++ b/backend/database/migrations/000001_initial_schema.up.sql @@ -0,0 +1,537 @@ +-- ============================================================================= +-- AURGANIZE V6.2 - INITIAL SCHEMA (CORRECTED) +-- ============================================================================= +-- Migration: 000001_initial_schema +-- Description: Creates core tables for multi-tenant project management +-- Author: Aurganize Team +-- Date: 2025-11-26 +-- ============================================================================= + +-- ============================================================================= +-- EXTENSIONS +-- ============================================================================= + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE EXTENSION IF NOT EXISTS "pg_trgm"; +CREATE EXTENSION IF NOT EXISTS "btree_gin"; + +-- ============================================================================= +-- ENUMS +-- ============================================================================= + +CREATE TYPE user_role AS ENUM ('admin', 'vendor', 'consumer', 'project_manager'); +CREATE TYPE tenant_type AS ENUM ('permanent', 'temporary'); +CREATE TYPE contract_status AS ENUM ('draft', 'active', 'completed', 'cancelled'); +CREATE TYPE deliverable_status AS ENUM ('pending', 'in_progress', 'submitted', 'approved', 'rejected'); +CREATE TYPE milestone_type AS ENUM ('fixed_date', 'duration_from_start', 'duration_from_previous'); +CREATE TYPE milestone_status AS ENUM ('pending', 'in_progress', 'completed'); + +-- ============================================================================= +-- CORE TABLES +-- ============================================================================= + +-- ----------------------------------------------------------------------------- +-- Tenants Table +-- ----------------------------------------------------------------------------- +-- Stores tenant (organization) information +-- Supports both permanent tenants (companies) and temporary tenants (projects) + +CREATE TABLE tenants ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + name VARCHAR(255) NOT NULL, + type tenant_type NOT NULL DEFAULT 'permanent', + parent_tenant_id UUID REFERENCES tenants(id) ON DELETE CASCADE, + is_active BOOLEAN NOT NULL DEFAULT true, + expires_at TIMESTAMPTZ, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT valid_tenant_type CHECK ( + (type = 'permanent' AND parent_tenant_id IS NULL AND expires_at IS NULL) OR + (type = 'temporary' AND parent_tenant_id IS NOT NULL AND expires_at IS NOT NULL) + ) +); + +-- Indexes +CREATE INDEX idx_tenants_parent ON tenants(parent_tenant_id) WHERE parent_tenant_id IS NOT NULL; +CREATE INDEX idx_tenants_active ON tenants(is_active) WHERE is_active = true; +CREATE INDEX idx_tenants_expires ON tenants(expires_at) WHERE expires_at IS NOT NULL; + +-- Comments +COMMENT ON TABLE tenants IS 'Organizations and project workspaces'; +COMMENT ON COLUMN tenants.type IS 'permanent: Long-lived organization, temporary: Project-specific workspace'; +COMMENT ON COLUMN tenants.parent_tenant_id IS 'For temporary tenants, links to parent permanent tenant'; + +-- ----------------------------------------------------------------------------- +-- Users Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE users ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Authentication + email VARCHAR(255) NOT NULL, + password_hash TEXT NOT NULL, + + -- Profile + name VARCHAR(255) NOT NULL, + avatar_url TEXT, + role user_role NOT NULL DEFAULT 'consumer', + + -- Status + is_active BOOLEAN NOT NULL DEFAULT true, + email_verified_at TIMESTAMPTZ, + last_login_at TIMESTAMPTZ, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT unique_email_per_tenant UNIQUE(tenant_id, email), + CONSTRAINT valid_email CHECK (email ~* '^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$') +); + +-- Indexes +CREATE INDEX idx_users_tenant ON users(tenant_id); +CREATE INDEX idx_users_email ON users(email); +CREATE INDEX idx_users_role ON users(role); +CREATE INDEX idx_users_active ON users(is_active) WHERE is_active = true; + +-- Comments +COMMENT ON TABLE users IS 'User accounts with multi-tenant support'; +COMMENT ON CONSTRAINT unique_email_per_tenant ON users IS 'Email must be unique within a tenant, but can exist in multiple tenants'; + +-- ----------------------------------------------------------------------------- +-- Contracts Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE contracts ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Parties + vendor_id UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + consumer_id UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + + -- Details + title VARCHAR(500) NOT NULL, + description TEXT, + status contract_status NOT NULL DEFAULT 'draft', + + -- Dates + start_date DATE NOT NULL, + end_date DATE NOT NULL, + + -- Financial + total_amount NUMERIC(12,2) NOT NULL DEFAULT 0.00, + currency VARCHAR(3) NOT NULL DEFAULT 'USD', + + -- Version control (optimistic locking) + version INTEGER NOT NULL DEFAULT 1, + + -- Metadata + created_by UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT valid_dates CHECK (end_date > start_date), + CONSTRAINT valid_amount CHECK (total_amount >= 0), + CONSTRAINT different_parties CHECK (vendor_id != consumer_id) +); + +-- Indexes +CREATE INDEX idx_contracts_tenant ON contracts(tenant_id); +CREATE INDEX idx_contracts_vendor ON contracts(vendor_id); +CREATE INDEX idx_contracts_consumer ON contracts(consumer_id); +CREATE INDEX idx_contracts_status ON contracts(status); +CREATE INDEX idx_contracts_dates ON contracts(start_date, end_date); +CREATE INDEX idx_contracts_search ON contracts USING GIN(to_tsvector('english', title || ' ' || COALESCE(description, ''))); + +-- Comments +COMMENT ON TABLE contracts IS 'Agreements between vendors and consumers'; +COMMENT ON COLUMN contracts.version IS 'For optimistic locking - increment on each update'; + +-- ----------------------------------------------------------------------------- +-- Deliverables Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE deliverables ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + contract_id UUID NOT NULL REFERENCES contracts(id) ON DELETE CASCADE, + + -- Details + title VARCHAR(500) NOT NULL, + description TEXT, + sequence_number INTEGER NOT NULL, + status deliverable_status NOT NULL DEFAULT 'pending', + + -- Dates + deadline DATE NOT NULL, + submitted_at TIMESTAMPTZ, + approved_at TIMESTAMPTZ, + + -- Submission + submitted_by UUID REFERENCES users(id) ON DELETE SET NULL, + approved_by UUID REFERENCES users(id) ON DELETE SET NULL, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT unique_sequence_per_contract UNIQUE(contract_id, sequence_number), + CONSTRAINT valid_sequence CHECK (sequence_number > 0) +); + +-- Indexes +CREATE INDEX idx_deliverables_tenant ON deliverables(tenant_id); +CREATE INDEX idx_deliverables_contract ON deliverables(contract_id); +CREATE INDEX idx_deliverables_status ON deliverables(status); +CREATE INDEX idx_deliverables_deadline ON deliverables(deadline); + +-- Comments +COMMENT ON TABLE deliverables IS 'Work items to be delivered as part of contracts'; +COMMENT ON COLUMN deliverables.sequence_number IS 'Order of deliverable in contract (1, 2, 3...)'; + +-- ----------------------------------------------------------------------------- +-- Milestones Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE milestones ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + deliverable_id UUID NOT NULL REFERENCES deliverables(id) ON DELETE CASCADE, + + -- Details + title VARCHAR(500) NOT NULL, + type milestone_type NOT NULL, + condition_value VARCHAR(100) NOT NULL, + amount NUMERIC(12,2) NOT NULL DEFAULT 0.00, + status milestone_status NOT NULL DEFAULT 'pending', + + -- Tracking + completed_at TIMESTAMPTZ, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT valid_amount CHECK (amount >= 0) +); + +-- Indexes +CREATE INDEX idx_milestones_tenant ON milestones(tenant_id); +CREATE INDEX idx_milestones_deliverable ON milestones(deliverable_id); +CREATE INDEX idx_milestones_status ON milestones(status); + +-- Comments +COMMENT ON TABLE milestones IS 'Payment milestones within deliverables'; +COMMENT ON COLUMN milestones.type IS 'Determines how condition_value is interpreted'; +COMMENT ON COLUMN milestones.condition_value IS 'Date or duration depending on type'; + +-- ----------------------------------------------------------------------------- +-- Comments Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE comments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Polymorphic relation + entity_type VARCHAR(50) NOT NULL, + entity_id UUID NOT NULL, + + -- Content + content TEXT NOT NULL, + + -- Author + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT valid_entity_type CHECK (entity_type IN ('contract', 'deliverable', 'milestone')) +); + +-- Indexes +CREATE INDEX idx_comments_tenant ON comments(tenant_id); +CREATE INDEX idx_comments_entity ON comments(entity_type, entity_id); +CREATE INDEX idx_comments_user ON comments(user_id); +CREATE INDEX idx_comments_created ON comments(created_at DESC); + +-- Comments +COMMENT ON TABLE comments IS 'Discussion comments on various entities'; +COMMENT ON COLUMN comments.entity_type IS 'Type of entity: contract, deliverable, milestone'; +COMMENT ON COLUMN comments.entity_id IS 'ID of the entity (contract, deliverable, or milestone)'; + +-- ----------------------------------------------------------------------------- +-- Attachments Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE attachments ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Polymorphic relation + entity_type VARCHAR(50) NOT NULL, + entity_id UUID NOT NULL, + + -- File details + filename VARCHAR(255) NOT NULL, + content_type VARCHAR(100) NOT NULL, + size BIGINT NOT NULL, + object_name TEXT NOT NULL, + + -- Status + status VARCHAR(20) NOT NULL DEFAULT 'pending', + + -- Tracking + uploaded_by UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + uploaded_at TIMESTAMPTZ, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ, + + -- Constraints + CONSTRAINT valid_size CHECK (size > 0), + CONSTRAINT valid_status CHECK (status IN ('pending', 'uploaded', 'processing', 'failed')), + CONSTRAINT valid_entity_type CHECK (entity_type IN ('contract', 'deliverable', 'milestone', 'comment')) +); + +-- Indexes +CREATE INDEX idx_attachments_tenant ON attachments(tenant_id); +CREATE INDEX idx_attachments_entity ON attachments(entity_type, entity_id); +CREATE INDEX idx_attachments_uploaded_by ON attachments(uploaded_by); +CREATE INDEX idx_attachments_status ON attachments(status); + +-- Comments +COMMENT ON TABLE attachments IS 'File attachments for various entities'; +COMMENT ON COLUMN attachments.object_name IS 'Object key in MinIO/S3'; + +-- ============================================================================= +-- AUDIT TABLES +-- ============================================================================= + +-- ----------------------------------------------------------------------------- +-- Audit Logs Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE audit_logs ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Action details + action VARCHAR(100) NOT NULL, + entity_type VARCHAR(50) NOT NULL, + entity_id UUID NOT NULL, + + -- Actor + actor_id UUID NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + + -- Changes + old_values JSONB, + new_values JSONB, + + -- Context + ip_address INET, + user_agent TEXT, + + -- Timestamp + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Indexes +CREATE INDEX idx_audit_tenant ON audit_logs(tenant_id); +CREATE INDEX idx_audit_entity ON audit_logs(entity_type, entity_id); +CREATE INDEX idx_audit_actor ON audit_logs(actor_id); +CREATE INDEX idx_audit_action ON audit_logs(action); +CREATE INDEX idx_audit_created ON audit_logs(created_at DESC); +CREATE INDEX idx_audit_values ON audit_logs USING GIN(old_values, new_values); + +-- Comments +COMMENT ON TABLE audit_logs IS 'Audit trail of all important actions'; +COMMENT ON COLUMN audit_logs.action IS 'e.g., contract.created, deliverable.submitted'; + +-- ----------------------------------------------------------------------------- +-- Analytics Events Table +-- ----------------------------------------------------------------------------- + +CREATE TABLE analytics_events ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID REFERENCES tenants(id) ON DELETE CASCADE, + + -- Event details + event_type VARCHAR(100) NOT NULL, + event_data JSONB NOT NULL DEFAULT '{}', + + -- User (nullable for anonymous events) + user_id UUID REFERENCES users(id) ON DELETE SET NULL, + + -- Context + ip_address INET, + user_agent TEXT, + + -- Timestamp + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Indexes +CREATE INDEX idx_analytics_tenant ON analytics_events(tenant_id); +CREATE INDEX idx_analytics_type ON analytics_events(event_type); +CREATE INDEX idx_analytics_user ON analytics_events(user_id); +CREATE INDEX idx_analytics_created ON analytics_events(created_at DESC); +CREATE INDEX idx_analytics_data ON analytics_events USING GIN(event_data); + +-- Comments +COMMENT ON TABLE analytics_events IS 'User behavior and system events for analytics'; + +-- ============================================================================= +-- NOTIFICATION TABLES +-- ============================================================================= + +CREATE TABLE notifications ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE, + + -- Recipient + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + + -- Content + type VARCHAR(50) NOT NULL, + title VARCHAR(255) NOT NULL, + message TEXT NOT NULL, + + -- Related entity + entity_type VARCHAR(50), + entity_id UUID, + + -- Status + read_at TIMESTAMPTZ, + + -- Metadata + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Indexes +CREATE INDEX idx_notifications_tenant ON notifications(tenant_id); +CREATE INDEX idx_notifications_user ON notifications(user_id); +CREATE INDEX idx_notifications_unread ON notifications(user_id, read_at) WHERE read_at IS NULL; +CREATE INDEX idx_notifications_entity ON notifications(entity_type, entity_id); +CREATE INDEX idx_notifications_created ON notifications(created_at DESC); + +-- Comments +COMMENT ON TABLE notifications IS 'In-app notifications for users'; + +-- ============================================================================= +-- TRIGGERS FOR UPDATED_AT +-- ============================================================================= + +-- Function to update updated_at timestamp +CREATE OR REPLACE FUNCTION update_updated_at_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Apply to all tables with updated_at +CREATE TRIGGER update_tenants_updated_at BEFORE UPDATE ON tenants + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_contracts_updated_at BEFORE UPDATE ON contracts + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_deliverables_updated_at BEFORE UPDATE ON deliverables + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_milestones_updated_at BEFORE UPDATE ON milestones + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_comments_updated_at BEFORE UPDATE ON comments + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +CREATE TRIGGER update_attachments_updated_at BEFORE UPDATE ON attachments + FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); + +-- ============================================================================= +-- ROW-LEVEL SECURITY +-- ============================================================================= + +-- Enable RLS on all tenant-scoped tables +ALTER TABLE tenants ENABLE ROW LEVEL SECURITY; +ALTER TABLE users ENABLE ROW LEVEL SECURITY; +ALTER TABLE contracts ENABLE ROW LEVEL SECURITY; +ALTER TABLE deliverables ENABLE ROW LEVEL SECURITY; +ALTER TABLE milestones ENABLE ROW LEVEL SECURITY; +ALTER TABLE comments ENABLE ROW LEVEL SECURITY; +ALTER TABLE attachments ENABLE ROW LEVEL SECURITY; +ALTER TABLE audit_logs ENABLE ROW LEVEL SECURITY; +ALTER TABLE analytics_events ENABLE ROW LEVEL SECURITY; +ALTER TABLE notifications ENABLE ROW LEVEL SECURITY; + +-- Create policies +CREATE POLICY tenants_tenant_isolation ON tenants + USING (id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY users_tenant_isolation ON users + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY contracts_tenant_isolation ON contracts + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY deliverables_tenant_isolation ON deliverables + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY milestones_tenant_isolation ON milestones + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY comments_tenant_isolation ON comments + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY attachments_tenant_isolation ON attachments + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY audit_logs_tenant_isolation ON audit_logs + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +CREATE POLICY analytics_events_tenant_isolation ON analytics_events + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID OR tenant_id IS NULL) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID OR tenant_id IS NULL); + +CREATE POLICY notifications_tenant_isolation ON notifications + USING (tenant_id = current_setting('app.current_tenant_id', true)::UUID) + WITH CHECK (tenant_id = current_setting('app.current_tenant_id', true)::UUID); + +-- ============================================================================= +-- END OF MIGRATION +-- ============================================================================= \ No newline at end of file diff --git a/backend/database/scripts/backup_db.sh b/backend/database/scripts/backup_db.sh new file mode 100644 index 0000000..62b53f5 --- /dev/null +++ b/backend/database/scripts/backup_db.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# database/scripts/backup.sh +set -e + +# ============================================================================ +# CONFIGURATION +# ============================================================================ + +CONTAINER_NAME="${CONTAINER_NAME:-aurganize-postgres}" + +# Host directory (on your laptop / machine) +BACKUP_DIR="${BACKUP_DIR:-$HOME/workspace/db_backup/aurganize_backups}" + +DB_NAME="${DB_NAME:-aurganize_dev}" +DB_USER="${DB_USER:-aurganize_backend_api}" +RETENTION_DAYS="${RETENTION_DAYS:-30}" + +# Ensure host backup directory exists +mkdir -p "${BACKUP_DIR}" + +# Timestamped filename on host +TIMESTAMP=$(date +"%Y%m%d_%H%M%S") +BACKUP_FILE="${BACKUP_DIR}/${DB_NAME}_${TIMESTAMP}.sql.gz" + +echo "===================================================================" +echo "PostgreSQL Backup (Docker) - $(date)" +echo "===================================================================" +echo "Container: ${CONTAINER_NAME}" +echo "Database : ${DB_NAME}" +echo "Backup : ${BACKUP_FILE}" +echo "" + +# ============================================================================ +# BACKUP EXECUTION +# ============================================================================ + +echo "Creating backup inside Docker container..." + +docker exec -i "${CONTAINER_NAME}" \ + pg_dump -U "${DB_USER}" -d "${DB_NAME}" --format=plain --no-owner --no-privileges \ + | gzip > "${BACKUP_FILE}" + +# ============================================================================ +# VERIFY BACKUP +# ============================================================================ + +if [ -f "${BACKUP_FILE}" ]; then + SIZE=$(du -h "${BACKUP_FILE}" | cut -f1) + echo "✓ Backup created successfully (${SIZE})" +else + echo "✗ Backup failed!" + exit 1 +fi + +# ============================================================================ +# CLEANUP OLD BACKUPS +# ============================================================================ + +echo "" +echo "Cleaning up old backups (older than ${RETENTION_DAYS} days)..." + +find "${BACKUP_DIR}" -name "${DB_NAME}_*.sql.gz" -type f -mtime +${RETENTION_DAYS} -delete + +# ============================================================================ +# LIST BACKUPS +# ============================================================================ + +echo "" +echo "Available backups:" +ls -lh "${BACKUP_DIR}/${DB_NAME}"_*.sql.gz 2>/dev/null || echo "No backups found." + +echo "" +echo "===================================================================" +echo "Backup Complete!" +echo "===================================================================" diff --git a/backend/database/scripts/configure-postgres.sh b/backend/database/scripts/configure-postgres.sh new file mode 100644 index 0000000..fb03ccd --- /dev/null +++ b/backend/database/scripts/configure-postgres.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# database/scripts/configure-postgres.sh +# +# PostgreSQL Configuration Management Script +# Applies recommended settings based on system resources +# NOTE THIS FILE IS FOR REFERENCE PURPOSE ONLY SINCE WE ARE USING INSIDE A DOCKER CONTAINER +# - WE HAVE FIND ANOTHER WAY TO PROVISION THIS SHELL SCRIPT +# -- DO NOT USE +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo "===================================================================" +echo "PostgreSQL Configuration Script for Aurganize V6.2" +echo "===================================================================" + +# Check if running as root +if [[ $EUID -ne 0 ]]; then + echo -e "${RED}This script must be run as root${NC}" + exit 1 +fi + +# Detect PostgreSQL version +PG_VERSION=$(sudo -u postgres psql -t -c "SELECT version();" | grep -oP '\d+(?:\.\d+)?' | head -1 | cut -d. -f1) +echo -e "${GREEN}Detected PostgreSQL version: ${PG_VERSION}${NC}" + +# Detect total RAM +TOTAL_RAM_KB=$(grep MemTotal /proc/meminfo | awk '{print $2}') +TOTAL_RAM_GB=$((TOTAL_RAM_KB / 1024 / 1024)) +echo -e "${GREEN}Detected RAM: ${TOTAL_RAM_GB}GB${NC}" + +# Calculate settings +SHARED_BUFFERS_GB=$((TOTAL_RAM_GB / 4)) +EFFECTIVE_CACHE_GB=$((TOTAL_RAM_GB * 3 / 4)) +WORK_MEM_MB=$(((TOTAL_RAM_GB - SHARED_BUFFERS_GB) * 1024 / 100 / 3)) + +echo "" +echo "Recommended settings:" +echo " shared_buffers = ${SHARED_BUFFERS_GB}GB" +echo " effective_cache_size = ${EFFECTIVE_CACHE_GB}GB" +echo " work_mem = ${WORK_MEM_MB}MB" +echo "" + +# Ask for confirmation +read -p "Apply these settings? (y/n) " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Aborted." + exit 0 +fi + +# Backup current config +CONFIG_FILE="/etc/postgresql/${PG_VERSION}/main/postgresql.conf" +BACKUP_FILE="${CONFIG_FILE}.backup.$(date +%Y%m%d_%H%M%S)" + +echo -e "${YELLOW}Backing up config to: ${BACKUP_FILE}${NC}" +cp "${CONFIG_FILE}" "${BACKUP_FILE}" + +# Apply settings +echo -e "${GREEN}Applying settings...${NC}" + +sudo -u postgres psql -c "ALTER SYSTEM SET shared_buffers = '${SHARED_BUFFERS_GB}GB';" +sudo -u postgres psql -c "ALTER SYSTEM SET effective_cache_size = '${EFFECTIVE_CACHE_GB}GB';" +sudo -u postgres psql -c "ALTER SYSTEM SET work_mem = '${WORK_MEM_MB}MB';" +sudo -u postgres psql -c "ALTER SYSTEM SET maintenance_work_mem = '256MB';" +sudo -u postgres psql -c "ALTER SYSTEM SET random_page_cost = '1.1';" +sudo -u postgres psql -c "ALTER SYSTEM SET effective_io_concurrency = '200';" + +# Restart PostgreSQL +echo -e "${YELLOW}Restarting PostgreSQL...${NC}" +systemctl restart postgresql + +# Verify +echo -e "${GREEN}Configuration applied successfully!${NC}" +sudo -u postgres psql -c "SELECT name, setting, unit FROM pg_settings WHERE name IN ('shared_buffers', 'effective_cache_size', 'work_mem');" + +echo "" +echo "===================================================================" +echo "Configuration complete!" +echo "Backup saved to: ${BACKUP_FILE}" +echo "===================================================================" \ No newline at end of file diff --git a/backend/database/scripts/dev_seed.sh b/backend/database/scripts/dev_seed.sh new file mode 100644 index 0000000..c5b34f7 --- /dev/null +++ b/backend/database/scripts/dev_seed.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +CONTAINER="aurganize-postgres" +DB_USER="postgres" +DB_NAME="aurganize_dev" +SEED_FILE="$(dirname "$0")/../seeds/001_dev_data.sql" + +echo "Running development seeds..." + +docker exec -i "$CONTAINER" \ + psql -U "$DB_USER" -d "$DB_NAME" < "$SEED_FILE" + +echo "✓ Development seed data loaded successfully" diff --git a/backend/database/scripts/health_check.sh b/backend/database/scripts/health_check.sh new file mode 100644 index 0000000..05855c2 --- /dev/null +++ b/backend/database/scripts/health_check.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# database/scripts/health_check.sh + +set -e + +CONTAINER_NAME="${CONTAINER_NAME:-aurganize-postgres}" +DB_USER="${DB_USER:-postgres}" + +# Updated path to match your new location +SQL_FILE="$(dirname "$0")/../tests/health_check.sql" + +echo "===================================================================" +echo "Running PostgreSQL Health Check (Docker)" +echo "Container : ${CONTAINER_NAME}" +echo "SQL File : ${SQL_FILE}" +echo "===================================================================" +echo "" + +# Ensure SQL file exists +if [ ! -f "$SQL_FILE" ]; then + echo "❌ SQL file not found: $SQL_FILE" + exit 1 +fi + +docker exec -i "${CONTAINER_NAME}" \ + psql -U "${DB_USER}" -f - <" + exit 1 + fi + echo "⚠️ Forcing version to $2..." + migrate -path "${MIGRATIONS_DIR}" -database "${DATABASE_URL}" force "$2" + echo -e "${GREEN}✓ Version forced${NC}" + ;; + + *) + echo "Usage: $0 {up|down|version|force }" + echo "" + echo "Commands:" + echo " up Apply all pending migrations" + echo " down Rollback the last migration" + echo " version Show current migration version" + echo " force Force database to specific version (use with caution!)" + exit 1 + ;; +esac \ No newline at end of file diff --git a/backend/database/scripts/restore_db.sh b/backend/database/scripts/restore_db.sh new file mode 100644 index 0000000..83465d9 --- /dev/null +++ b/backend/database/scripts/restore_db.sh @@ -0,0 +1,74 @@ +#!/bin/bash +# database/scripts/restore.sh + +set -e + +# ============================================================================ +# CONFIGURATION +# ============================================================================ + +CONTAINER_NAME="${CONTAINER_NAME:-aurganize-postgres}" + +# Same path as backup.sh (HOST system) +BACKUP_DIR="${BACKUP_DIR:-$HOME/workspace/db_backup/aurganize_backups}" + +DB_NAME="${DB_NAME:-aurganize_dev}" +DB_USER="${DB_USER:-aurganize_backend_api}" + +# ============================================================================ +# CHECK ARGUMENTS +# ============================================================================ + +if [ -z "$1" ]; then + echo "Usage: $0 " + echo "" + echo "Available backups:" + ls -lh "${BACKUP_DIR}/${DB_NAME}"_*.sql.gz 2>/dev/null || echo "No backups found" + exit 1 +fi + +BACKUP_FILE="$1" + +if [ ! -f "$BACKUP_FILE" ]; then + echo "❌ Backup file not found: $BACKUP_FILE" + exit 1 +fi + +echo "===================================================================" +echo "PostgreSQL Restore (Docker)" +echo "===================================================================" +echo "⚠️ WARNING: This will ERASE all data in database \"${DB_NAME}\"!" +echo "Backup file: ${BACKUP_FILE}" +echo "" +read -p "Type 'yes' to continue: " CONFIRM + +if [ "$CONFIRM" != "yes" ]; then + echo "Aborted." + exit 0 +fi + +echo "" +echo "Dropping & recreating database inside Docker container..." + +# ============================================================================ +# DROP + CREATE DATABASE INSIDE DOCKER +# ============================================================================ + +docker exec -i "${CONTAINER_NAME}" psql -U postgres -c "DROP DATABASE IF EXISTS ${DB_NAME};" +docker exec -i "${CONTAINER_NAME}" psql -U postgres -c "CREATE DATABASE ${DB_NAME} OWNER ${DB_USER};" + +echo "✓ Database recreated" +echo "" + +# ============================================================================ +# RESTORE DATABASE +# ============================================================================ + +echo "Restoring backup into Docker container..." + +gunzip < "${BACKUP_FILE}" | docker exec -i "${CONTAINER_NAME}" psql -U "${DB_USER}" -d "${DB_NAME}" + +echo "" +echo "===================================================================" +echo "Restore Complete!" +echo "===================================================================" diff --git a/backend/database/scripts/test_ops.sh b/backend/database/scripts/test_ops.sh new file mode 100644 index 0000000..4e75125 --- /dev/null +++ b/backend/database/scripts/test_ops.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -e + +CONTAINER="aurganize-postgres" +DB_USER="aurganize_backend_api" +DB_NAME="aurganize_dev" +SEED_FILE="$(dirname "$0")/../tests/test_operations.sql" + +echo "Running development seeds..." + +docker exec -i "$CONTAINER" \ + psql -U "$DB_USER" -d "$DB_NAME" < "$SEED_FILE" + +echo "✓ Development seed data loaded successfully" diff --git a/backend/database/seeds/001_dev_data.sql b/backend/database/seeds/001_dev_data.sql new file mode 100644 index 0000000..dba5ea9 --- /dev/null +++ b/backend/database/seeds/001_dev_data.sql @@ -0,0 +1,224 @@ +-- ============================================================================= +-- DEVELOPMENT SEED DATA +-- ============================================================================= +-- WARNING: DO NOT RUN IN PRODUCTION! +-- ============================================================================= +-- ============================================================================= +-- Temporarily disable RLS inside the seed script +-- ============================================================================= +ALTER TABLE tenants DISABLE ROW LEVEL SECURITY; +ALTER TABLE users DISABLE ROW LEVEL SECURITY; +ALTER TABLE contracts DISABLE ROW LEVEL SECURITY; +ALTER TABLE deliverables DISABLE ROW LEVEL SECURITY; +ALTER TABLE milestones DISABLE ROW LEVEL SECURITY; +ALTER TABLE comments DISABLE ROW LEVEL SECURITY; +ALTER TABLE attachments DISABLE ROW LEVEL SECURITY; +ALTER TABLE audit_logs DISABLE ROW LEVEL SECURITY; +ALTER TABLE analytics_events DISABLE ROW LEVEL SECURITY; +ALTER TABLE notifications DISABLE ROW LEVEL SECURITY; +-- ============================================================================= + +BEGIN; + +-- ============================================================================= +-- Create test tenants +-- ============================================================================= + +-- Permanent tenant 1: ACME Corporation +INSERT INTO tenants (id, name, type) VALUES + ('10000000-0000-0000-0000-000000000001', 'ACME Corporation', 'permanent'); + +-- Permanent tenant 2: TechStart Inc +INSERT INTO tenants (id, name, type) VALUES + ('20000000-0000-0000-0000-000000000002', 'TechStart Inc', 'permanent'); + +-- Temporary tenant (project workspace) +INSERT INTO tenants (id, name, type, parent_tenant_id, expires_at) VALUES + ('30000000-0000-0000-0000-000000000003', + 'ACME-Website-Project', + 'temporary', + '10000000-0000-0000-0000-000000000001', + NOW() + INTERVAL '6 months'); + +-- ============================================================================= +-- Create test users +-- ============================================================================= + +-- ACME Corporation users +-- Password for all: "password123" (hashed with bcrypt cost 12) +INSERT INTO users (id, tenant_id, email, password_hash, name, role) VALUES + -- Admin + ('11000000-0000-0000-0000-000000000001', + '10000000-0000-0000-0000-000000000001', + 'admin@acme.com', + '$2a$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYKU.PN5Hyu', + 'Alice Admin', + 'admin'), + + -- Vendor + ('11000000-0000-0000-0000-000000000002', + '10000000-0000-0000-0000-000000000001', + 'vendor@acme.com', + '$2a$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYKU.PN5Hyu', + 'Bob Vendor', + 'vendor'), + + -- Consumer + ('11000000-0000-0000-0000-000000000003', + '10000000-0000-0000-0000-000000000001', + 'consumer@acme.com', + '$2a$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYKU.PN5Hyu', + 'Charlie Consumer', + 'consumer'); + +-- TechStart Inc users +INSERT INTO users (id, tenant_id, email, password_hash, name, role) VALUES + ('21000000-0000-0000-0000-000000000001', + '20000000-0000-0000-0000-000000000002', + 'admin@techstart.com', + '$2a$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewY5GyYKU.PN5Hyu', + 'Diana Director', + 'admin'); + +-- ============================================================================= +-- Create test contracts +-- ============================================================================= + +INSERT INTO contracts ( + id, tenant_id, vendor_id, consumer_id, + title, description, status, + start_date, end_date, total_amount, created_by +) VALUES + -- Active contract + ('c1000000-0000-0000-0000-000000000001', + '10000000-0000-0000-0000-000000000001', + '11000000-0000-0000-0000-000000000002', + '11000000-0000-0000-0000-000000000003', + 'Website Redesign Project', + 'Complete overhaul of company website with modern design and responsive layout.', + 'active', + CURRENT_DATE - INTERVAL '1 month', + CURRENT_DATE + INTERVAL '5 months', + 50000.00, + '11000000-0000-0000-0000-000000000003'), + + -- Draft contract + ('c2000000-0000-0000-0000-000000000002', + '10000000-0000-0000-0000-000000000001', + '11000000-0000-0000-0000-000000000002', + '11000000-0000-0000-0000-000000000003', + 'Mobile App Development', + 'Native mobile application for iOS and Android platforms.', + 'draft', + CURRENT_DATE + INTERVAL '1 month', + CURRENT_DATE + INTERVAL '7 months', + 75000.00, + '11000000-0000-0000-0000-000000000003'); + +-- ============================================================================= +-- Create test deliverables +-- ============================================================================= + +INSERT INTO deliverables ( + id, tenant_id, contract_id, + title, description, sequence_number, status, deadline +) VALUES + -- Website project deliverables + ('d1000000-0000-0000-0000-000000000001', + '10000000-0000-0000-0000-000000000001', + 'c1000000-0000-0000-0000-000000000001', + 'Homepage Design', + 'Design mockups for the homepage including desktop and mobile views.', + 1, + 'approved', + CURRENT_DATE - INTERVAL '2 weeks'), + + ('d2000000-0000-0000-0000-000000000002', + '10000000-0000-0000-0000-000000000001', + 'c1000000-0000-0000-0000-000000000001', + 'Backend API Development', + 'RESTful API for content management and user authentication.', + 2, + 'in_progress', + CURRENT_DATE + INTERVAL '1 month'), + + ('d3000000-0000-0000-0000-000000000003', + '10000000-0000-0000-0000-000000000001', + 'c1000000-0000-0000-0000-000000000001', + 'Frontend Implementation', + 'React-based frontend with responsive design.', + 3, + 'pending', + CURRENT_DATE + INTERVAL '2 months'); + +-- ============================================================================= +-- Create test milestones +-- ============================================================================= + +INSERT INTO milestones ( + id, tenant_id, deliverable_id, + title, type, condition_value, amount, status +) VALUES + -- Homepage milestones + ('10000000-0000-4000-8000-000000000101', + '10000000-0000-0000-0000-000000000001', + 'd1000000-0000-0000-0000-000000000001', + 'Design approval', + 'fixed_date', + (CURRENT_DATE - INTERVAL '2 weeks')::TEXT, + 5000.00, + 'completed'), + + -- Backend milestones + ('20000000-0000-4000-8000-000000000102', + '10000000-0000-0000-0000-000000000001', + 'd2000000-0000-0000-0000-000000000002', + 'API endpoints complete', + 'duration_from_start', + '30', + 15000.00, + 'in_progress'); + +-- ============================================================================= +-- Create test comments +-- ============================================================================= + +INSERT INTO comments ( + id, tenant_id, entity_type, entity_id, + content, user_id +) VALUES + ('30000000-0000-4000-8000-000000000201', + '10000000-0000-0000-0000-000000000001', + 'contract', + 'c1000000-0000-0000-0000-000000000001', + 'Looking forward to starting this project!', + '11000000-0000-0000-0000-000000000003'), + + ('30000000-0000-4000-8000-000000000202', + '10000000-0000-0000-0000-000000000001', + 'deliverable', + 'd1000000-0000-0000-0000-000000000001', + 'Great work on the designs! Approved.', + '11000000-0000-0000-0000-000000000003'); + +COMMIT; + + + + +-- ============================================================================= +-- Re-enable RLS after the seed script +-- ============================================================================= + +ALTER TABLE tenants ENABLE ROW LEVEL SECURITY; +ALTER TABLE users ENABLE ROW LEVEL SECURITY; +ALTER TABLE contracts ENABLE ROW LEVEL SECURITY; +ALTER TABLE deliverables ENABLE ROW LEVEL SECURITY; +ALTER TABLE milestones ENABLE ROW LEVEL SECURITY; +ALTER TABLE comments ENABLE ROW LEVEL SECURITY; +ALTER TABLE attachments ENABLE ROW LEVEL SECURITY; +ALTER TABLE audit_logs ENABLE ROW LEVEL SECURITY; +ALTER TABLE analytics_events ENABLE ROW LEVEL SECURITY; +ALTER TABLE notifications ENABLE ROW LEVEL SECURITY; + +-- ============================================================================= diff --git a/backend/database/tests/health_check.sql b/backend/database/tests/health_check.sql new file mode 100644 index 0000000..f06e38f --- /dev/null +++ b/backend/database/tests/health_check.sql @@ -0,0 +1,70 @@ +-- database/scripts/health_check.sql + +\echo '=================================================================' +\echo 'PostgreSQL Health Check' +\echo '=================================================================' + +-- Database size +\echo '\nDatabase Sizes:' +SELECT + datname as database, + pg_size_pretty(pg_database_size(datname)) as size +FROM pg_database +WHERE datname LIKE 'aurganize%' +ORDER BY pg_database_size(datname) DESC; + +-- Table sizes +\echo '\nTop 10 Largest Tables:' +SELECT + schemaname, + tablename, + pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size +FROM pg_tables +WHERE schemaname = 'public' +ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC +LIMIT 10; + +-- Active connections +\echo '\nActive Connections:' +SELECT + count(*) as connections, + state +FROM pg_stat_activity +WHERE datname LIKE 'aurganize%' +GROUP BY state; + +-- Slow queries (if any) +\echo '\nSlow Queries (> 1 second):' +SELECT + pid, + now() - query_start as duration, + state, + substring(query, 1, 60) as query +FROM pg_stat_activity +WHERE state = 'active' + AND now() - query_start > interval '1 second' +ORDER BY duration DESC; + +-- Cache hit ratio (should be > 99%) +\echo '\nCache Hit Ratio:' +SELECT + sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) * 100 + as cache_hit_ratio +FROM pg_statio_user_tables; + +-- Index usage +\echo '\nUnused Indexes:' +SELECT + schemaname, + tablename, + indexname, + idx_scan as index_scans +FROM pg_stat_user_indexes +WHERE idx_scan = 0 + AND indexname NOT LIKE '%_pkey' +ORDER BY pg_relation_size(indexrelid) DESC +LIMIT 10; + +\echo '\n=================================================================' +\echo 'Health Check Complete' +\echo '=================================================================' \ No newline at end of file diff --git a/backend/database/tests/test_operations.sql b/backend/database/tests/test_operations.sql new file mode 100644 index 0000000..5c61c4b --- /dev/null +++ b/backend/database/tests/test_operations.sql @@ -0,0 +1,53 @@ +-- ============================================================================= +-- DATABASE OPERATIONS TEST SUITE +-- ============================================================================= + +\echo '=================================================================' +\echo 'Testing Basic Operations' +\echo '=================================================================' + +-- Set tenant context +SELECT set_tenant_context('10000000-0000-0000-0000-000000000001'::UUID); + +-- Test 1: Query contracts (should see only ACME contracts) +\echo '\n[Test 1] Query contracts for tenant...' +SELECT COUNT(*) as contract_count FROM contracts; +-- Expected: 2 + +-- Test 2: Query with joins +\echo '\n[Test 2] Query contracts with vendor/consumer...' +SELECT + c.title, + v.name as vendor_name, + con.name as consumer_name +FROM contracts c +JOIN users v ON c.vendor_id = v.id +JOIN users con ON c.consumer_id = con.id; +-- Expected: 2 rows + +-- Test 3: Full-text search +\echo '\n[Test 3] Search contracts...' +SELECT title +FROM contracts +WHERE to_tsvector('english', title || ' ' || COALESCE(description, '')) + @@ to_tsquery('english', 'website'); +-- Expected: 1 row (Website Redesign) + +-- Test 4: Test RLS isolation +\echo '\n[Test 4] Switch to different tenant...' +SELECT set_tenant_context('20000000-0000-0000-0000-000000000002'::UUID); +SELECT COUNT(*) FROM contracts; +-- Expected: 0 (different tenant) + +-- Test 5: Trigger test (updated_at) +\echo '\n[Test 5] Test updated_at trigger...' +SELECT set_tenant_context('10000000-0000-0000-0000-000000000001'::UUID); +UPDATE contracts +SET description = description || ' (updated)' +WHERE id = 'c1000000-0000-0000-0000-000000000001' +RETURNING title, updated_at > created_at as was_updated; +-- Expected: was_updated = true + +\echo '\n=================================================================' +\echo 'All Tests Complete!' +\echo '=================================================================' \ No newline at end of file diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 0000000..a2881b4 --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,49 @@ +module github.com/creativenoz/aurganize-v62/backend + +go 1.25.2 + + + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.7.6 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/labstack/echo/v4 v4.13.4 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/nats-io/nats.go v1.47.0 // indirect + github.com/nats-io/nkeys v0.4.11 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/redis/go-redis/v9 v9.17.1 // indirect + github.com/rs/zerolog v1.34.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.11.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gorm.io/driver/postgres v1.6.0 // indirect + gorm.io/gorm v1.31.1 // indirect +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 0000000..cda58ac --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,108 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg= +github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= +github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA= +github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM= +github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= +github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/redis/go-redis/v9 v9.17.1 h1:7tl732FjYPRT9H9aNfyTwKg9iTETjWjGKEJ2t/5iWTs= +github.com/redis/go-redis/v9 v9.17.1/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4= +gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo= +gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= +gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go new file mode 100644 index 0000000..036a102 --- /dev/null +++ b/backend/internal/config/config.go @@ -0,0 +1,209 @@ +package config + +import ( + "fmt" + "os" + "strconv" + "time" + + "github.com/joho/godotenv" +) + +type Config struct { + Server ServerConfig + Database DatabaseConfig + JWT JWTConfig + Redis RedisConfig + NATS NATSConfig + Storage StorageConfig +} + +// ServerConfig type holds the information about the http server settings +type ServerConfig struct { + Port string // HTTP port to listen on + Environment string // can be development, staging, production + ReadTimeout time.Duration // Max time to read request + WriteTimeout time.Duration // Max time to write response +} + +// DatabaseConfig contains postgresSQL connection settings +type DatabaseConfig struct { + Host string // Database host + Port string // Database port + User string // Database user + Password string // Database password + DBName string // Database name + SSLMode string // SSL mode : disable, require, verify-full ? not sure what this field is set for + MaxOpenConns int // Maximum open connections + MaxIdleConns int // Maximum idle connections + ConnMaxLifetime time.Duration // Maximum connection lifetime +} + +// JWT Config contains JWT token settings +type JWTConfig struct { + AccessSecret string // Secret for access tokens + RefreshSecret string // Secret for refresh tokens + AccessExpiry time.Duration // Accees token expiry (15 minutes) + RefreshExpiry time.Duration // Refresh token expiry (7 days) + Issuer string // Token issuer claim +} + +type RedisConfig struct { + Host string // Redis host + Port string // Redis port + Password string // Redis password (set to empty if no auth is set) + DB int // Redis database number +} + +// NATSConfig contains NATS messaging settings +type NATSConfig struct { + URL string // NATS server URL +} + +// StorageCongfig contains MinIO (s3) settings +type StorageConfig struct { + Endpoint string // MinIO endpoint + AccessKeyID string // Access key + SecretAccessKey string // Secret key + BucketName string // Bucket name + UseSSL bool // User HTTPS +} + +func Load() (*Config, error) { + if os.Getenv("APP_ENV") != "production" { + if err := godotenv.Load(); err != nil { + fmt.Println("Warning: .env file not found, using environment variables") + } + } + + cfg := &Config{ + Server: ServerConfig{ + Port: getEnv("SERVER_PORT", "8080"), + Environment: getEnv("APP_ENV", "development"), + ReadTimeout: parseDuration(getEnv("SERVER_READ_TIMEOUT", "10s")), + WriteTimeout: parseDuration(getEnv("SERVER_WRITE_TIMEOUT", "10s")), + }, + Database: DatabaseConfig{ + Host: getEnv("DB_HOST", "localhost"), + Port: getEnv("DB_PORT", "5432"), + User: getEnv("DB_USER", "aurganize"), + Password: getEnv("DB_PASSWORD", ""), + DBName: getEnv("DB_NAME", "aruganize_db_1"), + SSLMode: getEnv("DB_SSLMODE", "disable"), + MaxOpenConns: parseInt(getEnv("DB_MAX_OPEN_CONNECTIONS", "25")), + MaxIdleConns: parseInt(getEnv("DB_MAX_IDLE_CONNECTIONS", "5")), + ConnMaxLifetime: parseDuration(getEnv("DB_CONNECTION_MAX_LIFETIME", "5m")), + }, + + JWT: JWTConfig{ + AccessSecret: getEnv("JWT_ACCESS_SECRET", ""), + RefreshSecret: getEnv("JWT_REFRESH_SECRET", ""), + AccessExpiry: parseDuration(getEnv("JWT_ACCESS_EXPIRY", "15m")), + RefreshExpiry: parseDuration(getEnv("JWT_REFRESH_EXPIRY", "168h")), + Issuer: getEnv("JWT_ISSUER", "aurganize-v62"), + }, + Redis: RedisConfig{ + Host: getEnv("REDIST_HOST", "localhost"), + Port: getEnv("REDIS_PORT", "6379"), + Password: getEnv("REDIS_PASSWORD", ""), + DB: parseInt(getEnv("REDIS_DB", "0")), + }, + NATS: NATSConfig{ + URL: getEnv("NATS_URL", "nats://localhost:4222"), + }, + Storage: StorageConfig{ + Endpoint: getEnv("MINIO_ENDPOINT", "localhost:9000"), + AccessKeyID: getEnv("MINIO_ACCESS_KEY", "minioadmin"), + SecretAccessKey: getEnv("MINIO_SECRET_KEY", "miniosecretkey"), + BucketName: getEnv("MINIO_BUCKET", "aurganize_bucket_1"), + UseSSL: parseBool(getEnv("MINIO_USE_SSL", "false")), + }, + } + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("configuration validation failure [%w]", err) + } + + return cfg, nil +} + +// Validate checks if all required configuration is present and valid +func (c *Config) Validate() error { + // Database password required in production + if c.Database.Password == "" && c.Server.Environment == "production" { + return fmt.Errorf("DB_PASSWORD is required in production") + } + // JWT secrets are required always + if c.JWT.AccessSecret == "" { + return fmt.Errorf("JWT_ACCESS_SECRET is required") + } + + if c.JWT.RefreshSecret == "" { + return fmt.Errorf("JWT_REFRESH_SECRET is required") + } + // JWT secrets should be different + if c.JWT.AccessSecret == c.JWT.RefreshSecret { + return fmt.Errorf("JWT_ACCESS_SECRET and JWT_REFRESH_SECRET must be different") + } + + validEnvs := map[string]bool{ + "development": true, + "test": true, + "staging": true, + "UAT": true, + "production": true, + } + if !validEnvs[c.Server.Environment] { + return fmt.Errorf("invalid environment configured in enviroment") + } + return nil +} + +// Helper Functions +func getEnv(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue +} + +func parseDuration(s string) time.Duration { + d, err := time.ParseDuration(s) + if err != nil { + return 0 + } + return d +} + +func parseInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + return 0 + } + return i +} + +func parseBool(s string) bool { + b, err := strconv.ParseBool(s) + if err != nil { + return false + } + return b +} + +// DatabaseDSN returns the PostgreSQL connection string +func (c *Config) DatabaseDSN() string { + return fmt.Sprintf( + "host=%s post %s user=%s password=%s dbname=%s sslmode=%s", + c.Database.Host, + c.Database.Port, + c.Database.User, + c.Database.Password, + c.Database.DBName, + c.Database.SSLMode, + ) +} + +// RedisDSN returns the Redis connection string +func (c *Config) RedisDSN() string { + return fmt.Sprintf("%s:%s", c.Redis.Host, c.Redis.Port) +} diff --git a/backend/pkg/logger/examples_test.go b/backend/pkg/logger/examples_test.go new file mode 100644 index 0000000..3f857a9 --- /dev/null +++ b/backend/pkg/logger/examples_test.go @@ -0,0 +1,107 @@ +package logger_test + +import ( + "time" + + "github.com/creativenoz/aurganize-v62/backend/pkg/logger" + "github.com/rs/zerolog/log" +) + +// Example_basicUsage demonstrates basic logger usage +func Example_basicUsage() { + // Initialize logger + logger.Init("development") + + // Log at different levels + log.Debug().Msg("This is debug information") + log.Info().Msg("This is informational") + log.Warn().Msg("This is a warning") + log.Error().Msg("This is an error") + + // Output depends on log level +} + +// Example_structuredLogging demonstrates structured logging with fields +func Example_structuredLogging() { + logger.Init("development") + + // Log with structured fields + log.Info(). + Str("user_id", "12345"). + Str("action", "login"). + Bool("success", true). + Dur("duration", 150*time.Millisecond). + Msg("User login attempt") + + // JSON output: + // { + // "level": "info", + // "user_id": "12345", + // "action": "login", + // "success": true, + // "duration": 150, + // "message": "User login attempt" + // } +} + +// Example_contextLogger demonstrates creating logger with context +func Example_contextLogger() { + logger.Init("development") + + // Create logger with request context + requestLogger := logger.WithContext(map[string]interface{}{ + "request_id": "req-abc-123", + "user_id": "user-456", + "ip": "192.168.1.1", + }) + + // All logs from this logger include context + requestLogger.Info().Msg("Request started") + requestLogger.Info().Msg("Processing payment") + requestLogger.Info().Msg("Request completed") + + // All three logs include request_id, user_id, and ip +} + +// Example_errorLogging demonstrates logging errors +func Example_errorLogging() { + logger.Init("development") + + // Simulate an error + err := someFunction() + if err != nil { + log.Error(). + Err(err). // Add error + Str("user_id", "123"). + Str("operation", "database_query"). + Msg("Failed to fetch user") + } +} + +// Example_subLogger demonstrates module-specific loggers +func Example_subLogger() { + logger.Init("development") + + // Create a sub-logger for authentication module + authLogger := log.With(). + Str("module", "auth"). + Str("version", "v1"). + Logger() + + authLogger.Info().Msg("Auth module initialized") + authLogger.Debug().Msg("Loading auth configuration") + + // Create a sub-logger for database module + dbLogger := log.With(). + Str("module", "database"). + Logger() + + dbLogger.Info().Msg("Database connection established") + + // Now you can filter logs by module in production +} + +// Helper function for example +func someFunction() error { + return nil +} diff --git a/backend/pkg/logger/logger.go b/backend/pkg/logger/logger.go new file mode 100644 index 0000000..4d83dca --- /dev/null +++ b/backend/pkg/logger/logger.go @@ -0,0 +1,208 @@ +package logger + +import ( + "io" + "os" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +const ( + LevelDebug = "debug" + LevelInfo = "info" + LevelWarn = "warn" + LevelError = "error" +) + +// Init initializes the global logger with environment-specific settings. +// Call this once at application startup. +// +// Environment determines output format: +// - development: Pretty console output with colors +// - staging/production: JSON output for log aggregation +// +// Example: +// +// logger.Init("development") +// log.Info().Msg("Application started") +func Init(enviroment string) { + + // Configuring the time format for loggin + // Unix style timestamp in production for efficiency + // Humar-readable in development + if enviroment == "production" { + // This is the unixtimestamp format used in production for efficiency : 1732632645 + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + } else { + zerolog.TimeFieldFormat = time.RFC3339 + // This is format for human readable format : 2025-11-26T10:30:45-05:00 + } + + var output io.Writer = os.Stdout + + if enviroment == "development" { + output = zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: time.RFC3339, // 2025-11-26T10:30:45-05:00 + NoColor: false, // Enabling Colours + } + } + + // Production uses default JSON output to stdout + + // Set global logger + // Caller() : this caused adtional overhead as runtime.Caller() is called, which is worth + // for debugging value, should disable in production + log.Logger = zerolog.New(output). + With(). + Timestamp(). + Caller(). // This line is to add file and line number information into the log + Logger() + + // set global log level + // Debug (most verbose) + // ↓ + // Info + // ↓ + // Warn + // ↓ + // Error + // ↓ + // Fatal (least verbose) + switch enviroment { + case "production": + zerolog.SetGlobalLevel(zerolog.InfoLevel) + case "UAT": + zerolog.SetGlobalLevel(zerolog.InfoLevel) + case "staging": + zerolog.SetGlobalLevel(zerolog.InfoLevel) + case "test": + zerolog.SetGlobalLevel(zerolog.DebugLevel) + case "development": + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + // Log initialization + log.Info(). + Str("environment", enviroment). + Str("log_level", zerolog.GlobalLevel().String()). + Msg("logger initialized") +} + +func InitWithLevel(environment, level string) { + Init(environment) + + switch level { + case LevelDebug: + zerolog.SetGlobalLevel(zerolog.DebugLevel) + case LevelInfo: + zerolog.SetGlobalLevel(zerolog.InfoLevel) + case LevelWarn: + zerolog.SetGlobalLevel(zerolog.WarnLevel) + case LevelError: + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + default: + log.Warn(). + Str("provided_level", level). + Str("using_level", zerolog.GlobalLevel().String()). + Msg("Invalid log level, using default") + } + + log.Info(). + Str("environment", environment). + Str("log_level", zerolog.GlobalLevel().String()). + Msg("Logger initialized with custom level") +} + +// GetLogger returns the global logger instance. +// Use this to get a logger with additional context. +// +// Example : +// +// logger := logger.GetLogger(). +// With(). +// Str("module","auth"). +// Logger() +// logger.Info().Msg("Auth module started") +func GetLogger() *zerolog.Logger { + return &log.Logger +} + +// WithContext returns a logger with additional context fields. +// Useful for adding request-specific context. +// +// Example: +// +// contextLogger := logger.WithContext(map[string]interface{}{ +// "request_id": "abc-123", +// "user_id": "user-456", +// }) +// contextLogger.Info().Msg("Processing request") +func WithContext(fields map[string]interface{}) *zerolog.Logger { + logger := log.Logger + for key, value := range fields { + logger = logger.With().Interface(key, value).Logger() + } + + return &logger +} + +// Example Usage log functions (for familiarizing its usage) + +// LogDebug logs debug information (these are verbose, development only) +func ExampleDebug() { + log.Debug(). + Str("function", "ExampleDebug"). + Int("iteration", 1). + Msg("Debug information") +} + +func ExampleInfo() { + log.Info(). + Str("user_id", "123"). + Str("reason", "invalid_token"). + Msg("User logged in successfully") +} + +func ExampleError() { + log.Error(). + Err(nil). // we add the actual error here + Str("user_id", "123"). + Str("operation", "database_query"). + Msg("Failed to fetch user data") +} + +// Log Fatal logs fatal erros and exits the application +// user sparingly - only for unrecoverable errors +func ExmapleFatal() { + // log.Fatal(). + // Err(err). + // Msg("Cannot connect to database") + // -- Application exits after this +} + +// Log with fields demonstrated logging mutliple fields +func ExampleWithFields() { + log.Info(). + Str("user_id", "123"). + Str("email", "user@example.com"). + Int("login_attemtps", 3). + Bool("success", true). + Dur("duration", 150*time.Millisecond). + Msg("login completed") +} + +// Log with SubLogger demonstrates creating sub-loggers +func ExampleSubLogger() { + + // create a sub-logger for a specific module + authLogger := log.With(). + Str("module", "auth"). + Str("version", "v1"). + Logger() + + authLogger.Info().Msg("Auth module initialized") + authLogger.Debug().Msg("Loading auth configuration") +} diff --git a/backend/pkg/logger/logger_test.go b/backend/pkg/logger/logger_test.go new file mode 100644 index 0000000..09b82e8 --- /dev/null +++ b/backend/pkg/logger/logger_test.go @@ -0,0 +1,212 @@ +package logger + +import ( + "bytes" + "testing" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/assert" +) + +// TestInit_Developement tests logger initialization in development mode +func TestInit_Development(t *testing.T) { + // Arrange + environment := "development" + + // Act + Init(environment) + + // Assert + assert.Equal(t, zerolog.DebugLevel, zerolog.GlobalLevel(), "Development should use Debug level") +} + +// TestInit_Production tests logger initialization in production mode +func TestInit_Production(t *testing.T) { + // Arrange + environment := "production" + + // Act + Init(environment) + + // Assert + assert.Equal(t, zerolog.InfoLevel, zerolog.GlobalLevel(), "Production should use Info level") +} + +func TestInitWithLevel(t *testing.T) { + tests := []struct { + name string + environment string + level string + expectedLevel zerolog.Level + }{ + { + name: "Debug level", + environment: "production", + level: LevelDebug, + expectedLevel: zerolog.DebugLevel, + }, + { + name: "Info level", + environment: "production", + level: LevelInfo, + expectedLevel: zerolog.InfoLevel, + }, + { + name: "Warn level", + environment: "production", + level: LevelWarn, + expectedLevel: zerolog.WarnLevel, + }, + { + name: "Error level", + environment: "production", + level: LevelError, + expectedLevel: zerolog.ErrorLevel, + }, + { + name: "Invalid level defaults to Info", + environment: "production", + level: "invalid", + expectedLevel: zerolog.InfoLevel, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Act + InitWithLevel(test.environment, test.level) + + // Assert + assert.Equal(t, test.expectedLevel, zerolog.GlobalLevel()) + }) + } +} + +// TestGetLogger tests getting logger instance +func TestGetLogger(t *testing.T) { + // Arrage + Init("development") + // Act + logger := GetLogger() + + // Assert + assert.NotNil(t, logger, "GetLogger should return non-nil logger") +} + +// TestWithContext tests logging with additional context +func TestWithContext(t *testing.T) { + // Arrange + var buff bytes.Buffer + log.Logger = zerolog.New(&buff) + + fields := map[string]interface{}{ + "request_id": "test-123", + "user_id": "user-456", + } + + // Act + logger := WithContext(fields) + logger.Info().Msg("Test message") + + // Assert + output := buff.String() + assert.Contains(t, output, "test-123", "should include the request_id") + assert.Contains(t, output, "user-456", "should include the user_id") + assert.Contains(t, output, "Test message", "should include message") +} + +// TestLogLevels tests that log levels fitler correctly +func TestLogLevels(t *testing.T) { + tests := []struct { + name string + setLevel zerolog.Level + logLevel zerolog.Level + shouldAppear bool + }{ + { + name: "Debug message appears when level is Debug", + setLevel: zerolog.DebugLevel, + logLevel: zerolog.DebugLevel, + shouldAppear: true, + }, + { + name: "Debug message hidden when level is Info", + setLevel: zerolog.InfoLevel, + logLevel: zerolog.DebugLevel, + shouldAppear: false, + }, + { + name: "Error message appears when level is Info", + setLevel: zerolog.InfoLevel, + logLevel: zerolog.ErrorLevel, + shouldAppear: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Arrange + var buff bytes.Buffer + zerolog.SetGlobalLevel(test.setLevel) + log.Logger = zerolog.New(&buff) + + // Act + switch test.logLevel { + case zerolog.DebugLevel: + log.Debug().Msg("test message") + case zerolog.InfoLevel: + log.Info().Msg("test message") + case zerolog.ErrorLevel: + log.Error().Msg("test message") + } + + // Assert + output := buff.String() + if test.shouldAppear { + assert.Contains(t, output, "test message") + } else { + assert.Empty(t, output) + } + }) + } + +} + +// BenchmarkLogger benchmarks logger performance +func BenchmarkLogger(b *testing.B) { + // Setup + var buff bytes.Buffer + log.Logger = zerolog.New(&buff) + + b.ResetTimer() + + // Run Benchmark + + for i := 0; i < b.N; i++ { + log.Info(). + Str("key", "value"). + Int("number", 42). + Msg("Benchmark message") + } +} + +// Benchmark Logger with multiple fields +func BenchmarkLoggerWithFields(b *testing.B) { + // Setup + var buff bytes.Buffer + log.Logger = zerolog.New(&buff) + + b.ResetTimer() + + // Run Benchmark + for i := 0; i < b.N; i++ { + log.Info(). + Str("request_id", "abc-123"). + Str("user_id", "user-456"). + Str("method", "POST"). + Str("path", "/api/v1/users"). + Int("status", 200). + Dur("duration", 150). + Msg("Request completed") + } +} diff --git a/infrastructure/docker/docker-compose.yml b/infrastructure/docker/docker-compose.yml new file mode 100644 index 0000000..8a1940f --- /dev/null +++ b/infrastructure/docker/docker-compose.yml @@ -0,0 +1,25 @@ +version: '3.8' + +services: + postgres: + image: postgres:14-alpine + container_name: aurganize-postgres + restart: always + environment: + # Superuser credentials (for initialization only) + POSTGRES_USER: postgres + POSTGRES_PASSWORD: "dev_pass_aurganize@v6.2" + POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8" + ports: + - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + - ./init-scripts:/docker-entrypoint-initdb.d + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 10s + timeout: 5s + retries: 5 + +volumes: + postgres_data: \ No newline at end of file diff --git a/infrastructure/docker/init-scripts/01-create-users.sql b/infrastructure/docker/init-scripts/01-create-users.sql new file mode 100644 index 0000000..0505bff --- /dev/null +++ b/infrastructure/docker/init-scripts/01-create-users.sql @@ -0,0 +1,73 @@ +-- ========================================== +-- 01: CREATE APPLICATION USERS +-- ========================================== +-- This script creates PostgreSQL users for the application +-- Runs as: postgres (superuser) + +\echo '👤 Creating application users...' + +-- ========================================== +-- BACKEND API USER (Primary Application Role) +-- ========================================== +DO $$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'aurganize_backend_api') THEN + CREATE USER aurganize_backend_api WITH + PASSWORD 'dev_backend_pass_v6.2' -- CHANGE IN PRODUCTION! + LOGIN + NOSUPERUSER + NOCREATEDB + NOCREATEROLE + NOREPLICATION + CONNECTION LIMIT 50; + + RAISE NOTICE '✅ User aurganize_backend_api created'; + RAISE NOTICE '⚠️ DEFAULT PASSWORD SET - CHANGE IN PRODUCTION!'; + ELSE + RAISE NOTICE '⚠️ User aurganize_backend_api already exists'; + END IF; +END +$$; + +-- ========================================== +-- READ-ONLY USER (Analytics/Reporting) +-- ========================================== +DO $$ +BEGIN + IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'aurganize_readonly') THEN + CREATE USER aurganize_readonly WITH + PASSWORD 'dev_readonly_pass_v6.2' -- CHANGE IN PRODUCTION! + LOGIN + NOSUPERUSER + NOCREATEDB + NOCREATEROLE + NOREPLICATION + CONNECTION LIMIT 10; + + RAISE NOTICE '✅ User aurganize_readonly created'; + ELSE + RAISE NOTICE '⚠️ User aurganize_readonly already exists'; + END IF; +END +$$; + +-- ========================================== +-- VERIFY USERS CREATED +-- ========================================== +\echo '' +\echo '📋 Verifying users...' +SELECT + rolname AS username, + rolcanlogin AS can_login, + rolconnlimit AS connection_limit, + CASE + WHEN rolsuper THEN 'superuser' + ELSE 'regular user' + END AS user_type +FROM pg_roles +WHERE rolname IN ('aurganize_backend_api', 'aurganize_readonly') +ORDER BY rolname; + +\echo '' +\echo '✅ Users created successfully' +\echo '' \ No newline at end of file diff --git a/infrastructure/docker/init-scripts/02-create-databases.sql b/infrastructure/docker/init-scripts/02-create-databases.sql new file mode 100644 index 0000000..7bf61ed --- /dev/null +++ b/infrastructure/docker/init-scripts/02-create-databases.sql @@ -0,0 +1,186 @@ +-- ========================================== +-- 02: CREATE DATABASES +-- ========================================== +-- This script creates development, staging, and production databases +-- Runs as: postgres (superuser) + +\echo '🗄️ Creating databases...' + +-- ========================================== +-- DEVELOPMENT DATABASE +-- ========================================== +CREATE DATABASE aurganize_dev + WITH + OWNER = aurganize_backend_api + ENCODING = 'UTF8' + LC_COLLATE = 'en_US.UTF-8' + LC_CTYPE = 'en_US.UTF-8' + TABLESPACE = pg_default + CONNECTION LIMIT = 50 + TEMPLATE = template0; + +COMMENT ON DATABASE aurganize_dev IS 'Aurganize V6.2 - Development Database'; + +\echo '✅ Database aurganize_dev created' + +-- ========================================== +-- STAGING DATABASE +-- ========================================== +CREATE DATABASE aurganize_staging + WITH + OWNER = aurganize_backend_api + ENCODING = 'UTF8' + LC_COLLATE = 'en_US.UTF-8' + LC_CTYPE = 'en_US.UTF-8' + TABLESPACE = pg_default + CONNECTION LIMIT = 50 + TEMPLATE = template0; + +COMMENT ON DATABASE aurganize_staging IS 'Aurganize V6.2 - Staging Database'; + +\echo '✅ Database aurganize_staging created' + +-- ========================================== +-- PRODUCTION DATABASE +-- ========================================== +CREATE DATABASE aurganize_prod + WITH + OWNER = aurganize_backend_api + ENCODING = 'UTF8' + LC_COLLATE = 'en_US.UTF-8' + LC_CTYPE = 'en_US.UTF-8' + TABLESPACE = pg_default + CONNECTION LIMIT = 100 + TEMPLATE = template0; + +COMMENT ON DATABASE aurganize_prod IS 'Aurganize V6.2 - Production Database'; + +\echo '✅ Database aurganize_prod created' + +-- ========================================== +-- GRANT PERMISSIONS - DEVELOPMENT DATABASE +-- ========================================== +\echo '' +\echo '🔐 Configuring permissions for aurganize_dev...' + +\c aurganize_dev + +-- Grant schema usage +GRANT USAGE ON SCHEMA public TO aurganize_backend_api; + +-- Grant all permissions on tables (current and future) +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON TABLES TO aurganize_backend_api; + +-- Grant sequence permissions (for auto-increment IDs) +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON SEQUENCES TO aurganize_backend_api; + +-- Grant function execution +GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON FUNCTIONS TO aurganize_backend_api; + +-- Grant read-only access +GRANT CONNECT ON DATABASE aurganize_dev TO aurganize_readonly; +GRANT USAGE ON SCHEMA public TO aurganize_readonly; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO aurganize_readonly; + +-- Future tables +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT SELECT ON TABLES TO aurganize_readonly; + +\echo '✅ Permissions configured for aurganize_dev' + +-- ========================================== +-- GRANT PERMISSIONS - STAGING DATABASE +-- ========================================== +\echo '' +\echo '🔐 Configuring permissions for aurganize_staging...' + +\c aurganize_staging + +-- Grant schema usage +GRANT USAGE ON SCHEMA public TO aurganize_backend_api; + +-- Grant all permissions on tables (current and future) +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON TABLES TO aurganize_backend_api; + +-- Grant sequence permissions +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON SEQUENCES TO aurganize_backend_api; + +-- Grant function execution +GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON FUNCTIONS TO aurganize_backend_api; + +-- Grant read-only access +GRANT CONNECT ON DATABASE aurganize_staging TO aurganize_readonly; +GRANT USAGE ON SCHEMA public TO aurganize_readonly; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO aurganize_readonly; + +-- Future tables +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT SELECT ON TABLES TO aurganize_readonly; + +\echo '✅ Permissions configured for aurganize_staging' + +-- ========================================== +-- GRANT PERMISSIONS - PRODUCTION DATABASE +-- ========================================== +\echo '' +\echo '🔐 Configuring permissions for aurganize_prod...' + +\c aurganize_prod + +-- Grant schema usage +GRANT USAGE ON SCHEMA public TO aurganize_backend_api; + +-- Grant all permissions on tables (current and future) +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON TABLES TO aurganize_backend_api; + +-- Grant sequence permissions +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON SEQUENCES TO aurganize_backend_api; + +-- Grant function execution +GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO aurganize_backend_api; +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT ALL PRIVILEGES ON FUNCTIONS TO aurganize_backend_api; + +-- Grant read-only access +GRANT CONNECT ON DATABASE aurganize_prod TO aurganize_readonly; +GRANT USAGE ON SCHEMA public TO aurganize_readonly; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO aurganize_readonly; + +-- Future tables +ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT SELECT ON TABLES TO aurganize_readonly; + +\echo '✅ Permissions configured for aurganize_prod' + +-- ========================================== +-- SUMMARY +-- ========================================== +\echo '' +\echo '==========================================' +\echo '✅ All databases created and configured!' +\echo '==========================================' +\echo '' +\echo 'Databases:' +\echo ' - aurganize_dev (development)' +\echo ' - aurganize_staging (staging)' +\echo ' - aurganize_prod (production)' +\echo '' +\echo 'Owners: aurganize_backend_api' +\echo 'Read-only access: aurganize_readonly' +\echo '' \ No newline at end of file diff --git a/infrastructure/docker/init-scripts/03-install-extenstions.sql b/infrastructure/docker/init-scripts/03-install-extenstions.sql new file mode 100644 index 0000000..4d8b9b0 --- /dev/null +++ b/infrastructure/docker/init-scripts/03-install-extenstions.sql @@ -0,0 +1,112 @@ +-- ========================================== +-- 03: INSTALL POSTGRESQL EXTENSIONS +-- ========================================== +-- This script installs required extensions on all databases +-- Runs as: postgres (superuser) + +\echo '🔌 Installing PostgreSQL extensions...' + +-- ========================================== +-- DEVELOPMENT DATABASE +-- ========================================== +\c aurganize_dev + +\echo '' +\echo '📦 Installing extensions on aurganize_dev...' + +-- UUID Generation (Required) +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" + WITH SCHEMA public + VERSION "1.1"; + +COMMENT ON EXTENSION "uuid-ossp" IS 'Generate universally unique identifiers (UUIDs)'; + +\echo ' ✅ uuid-ossp installed' + +-- Trigram Matching (For Fuzzy Search) +CREATE EXTENSION IF NOT EXISTS "pg_trgm" + WITH SCHEMA public; + +COMMENT ON EXTENSION "pg_trgm" IS 'Text similarity measurement and index searching based on trigrams'; + +\echo ' ✅ pg_trgm installed' + +-- BTree GIN Index (For Complex Queries) +CREATE EXTENSION IF NOT EXISTS "btree_gin" + WITH SCHEMA public; + +COMMENT ON EXTENSION "btree_gin" IS 'Support for indexing common datatypes in GIN'; + +\echo ' ✅ btree_gin installed' + +-- Test UUID generation +DO $$ +DECLARE + test_uuid UUID; +BEGIN + test_uuid := uuid_generate_v4(); + RAISE NOTICE ' 🧪 Test UUID generated: %', test_uuid; +END $$; + +-- Test trigram similarity +DO $$ +DECLARE + similarity_score FLOAT; +BEGIN + similarity_score := similarity('PostgreSQL', 'Postgres'); + RAISE NOTICE ' 🧪 Trigram similarity test: %', similarity_score; +END $$; + +-- ========================================== +-- STAGING DATABASE +-- ========================================== +\c aurganize_staging + +\echo '' +\echo '📦 Installing extensions on aurganize_staging...' + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public VERSION "1.1"; +CREATE EXTENSION IF NOT EXISTS "pg_trgm" WITH SCHEMA public; +CREATE EXTENSION IF NOT EXISTS "btree_gin" WITH SCHEMA public; + +\echo ' ✅ All extensions installed on staging' + +-- ========================================== +-- PRODUCTION DATABASE +-- ========================================== +\c aurganize_prod + +\echo '' +\echo '📦 Installing extensions on aurganize_prod...' + +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public VERSION "1.1"; +CREATE EXTENSION IF NOT EXISTS "pg_trgm" WITH SCHEMA public; +CREATE EXTENSION IF NOT EXISTS "btree_gin" WITH SCHEMA public; + +\echo ' ✅ All extensions installed on production' + +-- ========================================== +-- SUMMARY +-- ========================================== +\echo '' +\echo '==========================================' +\echo '✅ Extensions installed successfully!' +\echo '==========================================' +\echo '' + +-- List all extensions (on dev database) +\c aurganize_dev + +SELECT + e.extname AS "Extension", + e.extversion AS "Version", + n.nspname AS "Schema" +FROM + pg_extension e + JOIN pg_namespace n ON e.extnamespace = n.oid +WHERE + e.extname IN ('uuid-ossp', 'pg_trgm', 'btree_gin') +ORDER BY + e.extname; + +\echo '' \ No newline at end of file diff --git a/infrastructure/docker/init-scripts/04-setup-rls.sql b/infrastructure/docker/init-scripts/04-setup-rls.sql new file mode 100644 index 0000000..f72b51e --- /dev/null +++ b/infrastructure/docker/init-scripts/04-setup-rls.sql @@ -0,0 +1,397 @@ +-- ========================================== +-- 04: ROW-LEVEL SECURITY HELPER FUNCTIONS +-- ========================================== +-- This script creates utility functions for RLS management +-- Runs as: postgres (superuser) + +\echo '🔒 Creating RLS helper functions...' + +-- ========================================== +-- DEVELOPMENT DATABASE +-- ========================================== +\c aurganize_dev + +\echo '' +\echo '🔧 Creating helper functions on aurganize_dev...' + +-- ========================================== +-- FUNCTION: set_tenant_context +-- Purpose: Sets the current tenant ID in session context +-- ========================================== +CREATE OR REPLACE FUNCTION set_tenant_context(p_tenant_id UUID) +RETURNS VOID AS $$ +BEGIN + -- Set session variable with tenant ID + -- set_config(setting_name, new_value, is_local) + -- setting_name: 'app.current_tenant_id' (custom variable) + -- new_value: p_tenant_id as TEXT + -- is_local: false = lasts until transaction end + -- true = lasts until statement end + PERFORM set_config('app.current_tenant_id', p_tenant_id::TEXT, false); + + -- Log for debugging (only visible with client_min_messages = DEBUG) + RAISE DEBUG 'Tenant context set to: %', p_tenant_id; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute to application user +GRANT EXECUTE ON FUNCTION set_tenant_context(UUID) TO aurganize_backend_api; + +-- Add comment +COMMENT ON FUNCTION set_tenant_context(UUID) IS + 'Sets the current tenant context for Row-Level Security filtering. Call at the beginning of each request.'; + +\echo ' ✅ Function set_tenant_context created' + +-- ========================================== +-- FUNCTION: get_current_tenant +-- Purpose: Returns the current tenant ID from session context +-- ========================================== +CREATE OR REPLACE FUNCTION get_current_tenant() +RETURNS UUID AS $$ +BEGIN + -- current_setting(setting_name, missing_ok) + -- setting_name: 'app.current_tenant_id' + -- missing_ok: true = return NULL if not set (don't error) + RETURN current_setting('app.current_tenant_id', true)::UUID; +EXCEPTION + WHEN OTHERS THEN + -- If conversion fails or any error, return NULL + RETURN NULL; +END; +$$ LANGUAGE plpgsql STABLE SECURITY DEFINER; + +-- Grant execute to application user +GRANT EXECUTE ON FUNCTION get_current_tenant() TO aurganize_backend_api; + +-- Add comment +COMMENT ON FUNCTION get_current_tenant() IS + 'Returns the current tenant ID from session context. Returns NULL if not set.'; + +\echo ' ✅ Function get_current_tenant created' + +-- ========================================== +-- FUNCTION: create_tenant_isolation_policy +-- Purpose: Creates standard RLS policy on a table +-- ========================================== +CREATE OR REPLACE FUNCTION create_tenant_isolation_policy(p_table_name TEXT) +RETURNS VOID AS $$ +DECLARE + v_policy_name TEXT; +BEGIN + -- Generate policy name + v_policy_name := p_table_name || '_tenant_isolation'; + + -- Enable RLS on table + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + + -- Drop policy if exists (for re-running) + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + + -- Create policy + -- USING clause: Controls which rows are visible for SELECT/UPDATE/DELETE + -- WITH CHECK clause: Controls which rows can be inserted/updated + EXECUTE format(' + CREATE POLICY %I ON %I + USING (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID) + WITH CHECK (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID) + ', v_policy_name, p_table_name); + + RAISE NOTICE '✅ Created RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; + +-- Grant execute to application user +GRANT EXECUTE ON FUNCTION create_tenant_isolation_policy(TEXT) TO aurganize_backend_api; + +-- Add comment +COMMENT ON FUNCTION create_tenant_isolation_policy(TEXT) IS + 'Creates a standard tenant isolation RLS policy on the specified table. Table must have a tenant_id column.'; + +\echo ' ✅ Function create_tenant_isolation_policy created' + +-- ========================================== +-- FUNCTION: create_contract_participant_policy +-- Purpose: Creates RLS policy for contract-based access +-- ========================================== +CREATE OR REPLACE FUNCTION create_contract_participant_policy(p_table_name TEXT) +RETURNS VOID AS $$ +DECLARE + v_policy_name TEXT; +BEGIN + -- Generate policy name + v_policy_name := p_table_name || '_contract_access'; + + -- Enable RLS on table + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + + -- Drop policy if exists + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + + -- Create policy for contract-related tables + -- Allows access if tenant is vendor OR consumer in the contract + EXECUTE format(' + CREATE POLICY %I ON %I + USING ( + EXISTS ( + SELECT 1 FROM contracts + WHERE contracts.id = %I.contract_id + AND ( + contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID + OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID + ) + ) + ) + WITH CHECK ( + EXISTS ( + SELECT 1 FROM contracts + WHERE contracts.id = %I.contract_id + AND ( + contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID + OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID + ) + ) + ) + ', v_policy_name, p_table_name, p_table_name, p_table_name); + + RAISE NOTICE '✅ Created contract-based RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; + +-- Grant execute to application user +GRANT EXECUTE ON FUNCTION create_contract_participant_policy(TEXT) TO aurganize_backend_api; + +-- Add comment +COMMENT ON FUNCTION create_contract_participant_policy(TEXT) IS + 'Creates an RLS policy for contract-related tables. Grants access if tenant is vendor or consumer. Table must have a contract_id column.'; + +\echo ' ✅ Function create_contract_participant_policy created' + + +-- ========================================== +-- FUNCTION: audit_rls_status +-- Purpose: Audit and report RLS status across all tables +-- ========================================== +CREATE OR REPLACE FUNCTION audit_rls_status() +RETURNS TABLE ( + table_name TEXT, + rls_enabled BOOLEAN, + policy_count BIGINT, + status TEXT +) AS $$ +BEGIN + RETURN QUERY + SELECT + t.tablename::TEXT AS table_name, + t.rowsecurity AS rls_enabled, + COUNT(p.policyname) AS policy_count, + CASE + WHEN t.rowsecurity AND COUNT(p.policyname) > 0 THEN '✓ Protected' + WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN '⚠ Enabled but no policies' + ELSE '✗ Not protected' + END AS status + FROM pg_tables t + LEFT JOIN pg_policies p + ON p.tablename = t.tablename + AND p.schemaname = t.schemaname + WHERE t.schemaname = 'public' + AND t.tablename NOT LIKE 'pg_%' -- Exclude PostgreSQL system tables + AND t.tablename NOT LIKE 'sql_%' -- Exclude SQL standard tables + GROUP BY t.schemaname, t.tablename, t.rowsecurity + ORDER BY + CASE + WHEN NOT t.rowsecurity THEN 1 -- Not protected first + WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN 2 -- Enabled but no policies + ELSE 3 -- Protected + END, + t.tablename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Grant execute to application user and readonly user +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_backend_api; +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_readonly; + +-- Add comment +COMMENT ON FUNCTION audit_rls_status() IS + 'Audits and reports Row-Level Security status for all tables in the public schema. Returns table name, RLS enabled status, policy count, and protection status.'; + +\echo ' ✅ Function audit_rls_status created' + + +-- ========================================== +-- STAGING DATABASE +-- ========================================== +\c aurganize_staging + +\echo '' +\echo '🔧 Creating helper functions on aurganize_staging...' + +CREATE OR REPLACE FUNCTION set_tenant_context(p_tenant_id UUID) RETURNS VOID AS $$ +BEGIN + PERFORM set_config('app.current_tenant_id', p_tenant_id::TEXT, false); + RAISE DEBUG 'Tenant context set to: %', p_tenant_id; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION set_tenant_context(UUID) TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION get_current_tenant() RETURNS UUID AS $$ +BEGIN + RETURN current_setting('app.current_tenant_id', true)::UUID; +EXCEPTION WHEN OTHERS THEN RETURN NULL; +END; +$$ LANGUAGE plpgsql STABLE SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION get_current_tenant() TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION create_tenant_isolation_policy(p_table_name TEXT) RETURNS VOID AS $$ +DECLARE v_policy_name TEXT; +BEGIN + v_policy_name := p_table_name || '_tenant_isolation'; + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + EXECUTE format('CREATE POLICY %I ON %I USING (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID) WITH CHECK (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID)', v_policy_name, p_table_name); + RAISE NOTICE '✅ Created RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; +GRANT EXECUTE ON FUNCTION create_tenant_isolation_policy(TEXT) TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION create_contract_participant_policy(p_table_name TEXT) RETURNS VOID AS $$ +DECLARE v_policy_name TEXT; +BEGIN + v_policy_name := p_table_name || '_contract_access'; + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + EXECUTE format('CREATE POLICY %I ON %I USING (EXISTS (SELECT 1 FROM contracts WHERE contracts.id = %I.contract_id AND (contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID))) WITH CHECK (EXISTS (SELECT 1 FROM contracts WHERE contracts.id = %I.contract_id AND (contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID)))', v_policy_name, p_table_name, p_table_name, p_table_name); + RAISE NOTICE '✅ Created contract-based RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; +GRANT EXECUTE ON FUNCTION create_contract_participant_policy(TEXT) TO aurganize_backend_api; + +\echo ' ✅ All helper functions created on staging' + +-- ========================================== +-- FUNCTION: audit_rls_status +-- Purpose: Audit and report RLS status across all tables +-- ========================================== +CREATE OR REPLACE FUNCTION audit_rls_status() +RETURNS TABLE (table_name TEXT, rls_enabled BOOLEAN, policy_count BIGINT, status TEXT) AS $$ +BEGIN + RETURN QUERY + SELECT + t.tablename::TEXT, + t.rowsecurity, + COUNT(p.policyname), + CASE + WHEN t.rowsecurity AND COUNT(p.policyname) > 0 THEN '✓ Protected' + WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN '⚠ Enabled but no policies' + ELSE '✗ Not protected' + END + FROM pg_tables t + LEFT JOIN pg_policies p ON p.tablename = t.tablename AND p.schemaname = t.schemaname + WHERE t.schemaname = 'public' AND t.tablename NOT LIKE 'pg_%' AND t.tablename NOT LIKE 'sql_%' + GROUP BY t.schemaname, t.tablename, t.rowsecurity + ORDER BY CASE WHEN NOT t.rowsecurity THEN 1 WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN 2 ELSE 3 END, t.tablename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_backend_api; +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_readonly; +COMMENT ON FUNCTION audit_rls_status() IS 'Audits and reports Row-Level Security status for all tables.'; + + +-- ========================================== +-- PRODUCTION DATABASE +-- ========================================== +\c aurganize_prod + +\echo '' +\echo '🔧 Creating helper functions on aurganize_prod...' + +CREATE OR REPLACE FUNCTION set_tenant_context(p_tenant_id UUID) RETURNS VOID AS $$ +BEGIN + PERFORM set_config('app.current_tenant_id', p_tenant_id::TEXT, false); + RAISE DEBUG 'Tenant context set to: %', p_tenant_id; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION set_tenant_context(UUID) TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION get_current_tenant() RETURNS UUID AS $$ +BEGIN + RETURN current_setting('app.current_tenant_id', true)::UUID; +EXCEPTION WHEN OTHERS THEN RETURN NULL; +END; +$$ LANGUAGE plpgsql STABLE SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION get_current_tenant() TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION create_tenant_isolation_policy(p_table_name TEXT) RETURNS VOID AS $$ +DECLARE v_policy_name TEXT; +BEGIN + v_policy_name := p_table_name || '_tenant_isolation'; + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + EXECUTE format('CREATE POLICY %I ON %I USING (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID) WITH CHECK (tenant_id = current_setting(''app.current_tenant_id'', true)::UUID)', v_policy_name, p_table_name); + RAISE NOTICE '✅ Created RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; +GRANT EXECUTE ON FUNCTION create_tenant_isolation_policy(TEXT) TO aurganize_backend_api; + +CREATE OR REPLACE FUNCTION create_contract_participant_policy(p_table_name TEXT) RETURNS VOID AS $$ +DECLARE v_policy_name TEXT; +BEGIN + v_policy_name := p_table_name || '_contract_access'; + EXECUTE format('ALTER TABLE %I ENABLE ROW LEVEL SECURITY', p_table_name); + EXECUTE format('DROP POLICY IF EXISTS %I ON %I', v_policy_name, p_table_name); + EXECUTE format('CREATE POLICY %I ON %I USING (EXISTS (SELECT 1 FROM contracts WHERE contracts.id = %I.contract_id AND (contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID))) WITH CHECK (EXISTS (SELECT 1 FROM contracts WHERE contracts.id = %I.contract_id AND (contracts.vendor_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID OR contracts.consumer_tenant_id = current_setting(''app.current_tenant_id'', true)::UUID)))', v_policy_name, p_table_name, p_table_name, p_table_name); + RAISE NOTICE '✅ Created contract-based RLS policy: % on table: %', v_policy_name, p_table_name; +END; +$$ LANGUAGE plpgsql; +GRANT EXECUTE ON FUNCTION create_contract_participant_policy(TEXT) TO aurganize_backend_api; + +\echo ' ✅ All helper functions created on production' + +-- ========================================== +-- FUNCTION: audit_rls_status +-- Purpose: Audit and report RLS status across all tables +-- ========================================== +CREATE OR REPLACE FUNCTION audit_rls_status() +RETURNS TABLE (table_name TEXT, rls_enabled BOOLEAN, policy_count BIGINT, status TEXT) AS $$ +BEGIN + RETURN QUERY + SELECT + t.tablename::TEXT, + t.rowsecurity, + COUNT(p.policyname), + CASE + WHEN t.rowsecurity AND COUNT(p.policyname) > 0 THEN '✓ Protected' + WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN '⚠ Enabled but no policies' + ELSE '✗ Not protected' + END + FROM pg_tables t + LEFT JOIN pg_policies p ON p.tablename = t.tablename AND p.schemaname = t.schemaname + WHERE t.schemaname = 'public' AND t.tablename NOT LIKE 'pg_%' AND t.tablename NOT LIKE 'sql_%' + GROUP BY t.schemaname, t.tablename, t.rowsecurity + ORDER BY CASE WHEN NOT t.rowsecurity THEN 1 WHEN t.rowsecurity AND COUNT(p.policyname) = 0 THEN 2 ELSE 3 END, t.tablename; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_backend_api; +GRANT EXECUTE ON FUNCTION audit_rls_status() TO aurganize_readonly; +COMMENT ON FUNCTION audit_rls_status() IS 'Audits and reports Row-Level Security status for all tables.'; + + +-- ========================================== +-- SUMMARY +-- ========================================== +\echo '' +\echo '==========================================' +\echo '✅ RLS helper functions created!' +\echo '==========================================' +\echo '' +\echo 'Functions created on all databases:' +\echo ' - set_tenant_context(UUID) Set current tenant' +\echo ' - get_current_tenant() Get current tenant' +\echo ' - create_tenant_isolation_policy(TEXT) Apply RLS to table' +\echo ' - create_contract_participant_policy(TEXT) Contract-based RLS' +\echo '' +\echo 'Usage example:' +\echo ' SELECT set_tenant_context(''550e8400-e29b-41d4-a716-446655440000''::UUID);' +\echo ' SELECT create_tenant_isolation_policy(''users'');' +\echo '' \ No newline at end of file diff --git a/infrastructure/docker/init-scripts/05-test-rls.sql b/infrastructure/docker/init-scripts/05-test-rls.sql new file mode 100644 index 0000000..332df05 --- /dev/null +++ b/infrastructure/docker/init-scripts/05-test-rls.sql @@ -0,0 +1,212 @@ +-- ========================================== +-- 05: TEST ROW-LEVEL SECURITY (OPTIONAL) +-- ========================================== +-- This script tests RLS functionality +-- Comment out this file in production! +-- Runs as: postgres (superuser) + +\echo '' +\echo '⚠️ WARNING: This is a test script!' +\echo '⚠️ Remove or comment out for production!' +\echo '' +\echo '🧪 Testing Row-Level Security...' + +-- ========================================== +-- Connect to development database +-- ========================================== +\c aurganize_dev + +-- ========================================== +-- CREATE TEST TABLE +-- ========================================== +\echo '' +\echo '📋 Creating test table...' + +DROP TABLE IF EXISTS rls_test CASCADE; + +CREATE TABLE rls_test ( + id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), + tenant_id UUID NOT NULL, + data TEXT +); + +\echo ' ✅ Table rls_test created' + +-- ========================================== +-- APPLY RLS POLICY +-- ========================================== +\echo '' +\echo '🔒 Applying RLS policy...' + +SELECT create_tenant_isolation_policy('rls_test'); + +\echo ' ✅ RLS policy applied' + +-- ========================================== +-- INSERT TEST DATA +-- ========================================== +\echo '' +\echo '📝 Inserting test data...' + +INSERT INTO rls_test (tenant_id, data) VALUES + ('550e8400-e29b-41d4-a716-446655440000', 'Tenant A - Record 1'), + ('550e8400-e29b-41d4-a716-446655440000', 'Tenant A - Record 2'), + ('660e8400-e29b-41d4-a716-446655440001', 'Tenant B - Record 1'), + ('660e8400-e29b-41d4-a716-446655440001', 'Tenant B - Record 2'); + +\echo ' ✅ Inserted 4 records (2 per tenant)' + +-- ========================================== +-- TEST AS SUPERUSER (Should see all) +-- ========================================== +\echo '' +\echo '👑 Testing as superuser (should see ALL records)...' + +SELECT COUNT(*) AS total_records FROM rls_test; + +\echo '' + +-- ========================================== +-- TEST AS APPLICATION USER +-- ========================================== +\echo '' +\echo '👤 Testing as application user...' +\echo '' + +-- Reconnect as application user +\c aurganize_dev aurganize_backend_api + +-- ========================================== +-- TEST 1: Set Tenant A Context +-- ========================================== +\echo '🔧 Test 1: Setting Tenant A context...' +SELECT set_tenant_context('550e8400-e29b-41d4-a716-446655440000'::UUID); + +\echo '📊 Querying (should see 2 Tenant A records only):' +SELECT id, tenant_id, data FROM rls_test ORDER BY data; + +\echo '' + +-- Verify current tenant +\echo '🔍 Current tenant:' +SELECT get_current_tenant() AS current_tenant; + +\echo '' + +-- ========================================== +-- TEST 2: Switch to Tenant B +-- ========================================== +\echo '🔧 Test 2: Switching to Tenant B context...' +SELECT set_tenant_context('660e8400-e29b-41d4-a716-446655440001'::UUID); + +\echo '📊 Querying (should see 2 Tenant B records only):' +SELECT id, tenant_id, data FROM rls_test ORDER BY data; + +\echo '' + +-- ========================================== +-- TEST 3: Try to Insert with Wrong Tenant ID +-- ========================================== +\echo '🧪 Test 3: Attempting to insert with WRONG tenant_id (should FAIL)...' + +DO $$ +BEGIN + INSERT INTO rls_test (tenant_id, data) + VALUES ('550e8400-e29b-41d4-a716-446655440000', 'Hacked!'); + + RAISE EXCEPTION 'ERROR: Insert succeeded when it should have failed!'; +EXCEPTION + WHEN others THEN + IF SQLERRM LIKE '%violates row-level security policy%' THEN + RAISE NOTICE ' ✅ PASS: Insert correctly blocked by RLS'; + ELSE + RAISE NOTICE ' ❌ FAIL: Unexpected error: %', SQLERRM; + END IF; +END; +$$; + +\echo '' + +-- ========================================== +-- TEST 4: Insert with Correct Tenant ID +-- ========================================== +\echo '🧪 Test 4: Inserting with CORRECT tenant_id (should SUCCEED)...' + +INSERT INTO rls_test (tenant_id, data) +VALUES ('660e8400-e29b-41d4-a716-446655440001', 'Valid insert'); + +\echo ' ✅ PASS: Insert succeeded' + +\echo '' +\echo '📊 Query again (should now see 3 Tenant B records):' +SELECT id, tenant_id, data FROM rls_test ORDER BY data; + +\echo '' + +-- ========================================== +-- TEST 5: Try to Update to Different Tenant +-- ========================================== +\echo '🧪 Test 5: Attempting to UPDATE to different tenant_id (should FAIL)...' + +DO $$ +BEGIN + UPDATE rls_test + SET tenant_id = '550e8400-e29b-41d4-a716-446655440000' + WHERE data = 'Valid insert'; + + RAISE EXCEPTION 'ERROR: Update succeeded when it should have failed!'; +EXCEPTION + WHEN others THEN + IF SQLERRM LIKE '%violates row-level security policy%' THEN + RAISE NOTICE ' ✅ PASS: Update correctly blocked by RLS'; + ELSE + RAISE NOTICE ' ❌ FAIL: Unexpected error: %', SQLERRM; + END IF; +END; +$$; + +\echo '' + +-- ========================================== +-- TEST 6: Try to See All Records +-- ========================================== +\echo '🧪 Test 6: Verifying data is still filtered...' +\echo '📊 Count (should still be 3 Tenant B records only):' + +SELECT COUNT(*) AS visible_records FROM rls_test; + +\echo '' + +-- ========================================== +-- CLEANUP +-- ========================================== +\echo '🧹 Cleaning up...' + +-- Reconnect as superuser +\c aurganize_dev postgres + +-- Drop test table +DROP TABLE IF EXISTS rls_test CASCADE; + +\echo ' ✅ Test table dropped' + +-- ========================================== +-- TEST SUMMARY +-- ========================================== +\echo '' +\echo '==========================================' +\echo '✅ RLS TESTING COMPLETE!' +\echo '==========================================' +\echo '' +\echo 'Test Results:' +\echo ' ✅ Tenant isolation works correctly' +\echo ' ✅ Cannot insert with wrong tenant_id' +\echo ' ✅ Can insert with correct tenant_id' +\echo ' ✅ Cannot update to different tenant_id' +\echo ' ✅ Queries are properly filtered' +\echo '' +\echo '🔒 Row-Level Security is functioning as expected!' +\echo '' +\echo '⚠️ REMINDER: Remove or comment out this test' +\echo ' script for production deployment!' +\echo '' \ No newline at end of file