internal/database/ops_logs.go (view raw)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
package database import ( "fmt" "strings" "time" "github.com/gocql/gocql" "golang.org/x/exp/rand" ) func (s *service) BatchInsertLogs(logs []Log) error { batch := s.session.NewBatch(gocql.LoggedBatch) for _, log := range logs { batch.Query(` INSERT INTO logs ( application_id, timestamp, log_id, user_id, log_level, message ) VALUES (?, ?, ?, ?, ?, ?)`, log.ApplicationID, log.Timestamp, log.LogID, log.UserID, log.Level, log.Message, ) } if err := s.session.ExecuteBatch(batch); err != nil { return fmt.Errorf("failed to batch insert logs: %w", err) } return nil } func (s *service) GetRecentLogs(filter LogsFilter) ([]Log, error) { if filter.PageSize <= 0 || filter.PageSize > 100 { filter.PageSize = 100 } // Build the query dynamically based on filters query := "SELECT application_id, timestamp, log_id, user_id, log_level, message FROM logs WHERE application_id = ?" args := []interface{}{filter.ApplicationID} // Add timestamp conditions if filter.Cursor != nil { query += " AND timestamp < ?" args = append(args, *filter.Cursor) } if filter.StartTime != nil { query += " AND timestamp >= ?" args = append(args, *filter.StartTime) } if filter.EndTime != nil { query += " AND timestamp <= ?" args = append(args, *filter.EndTime) } // Add log level filter if specified if filter.LogLevel != "" { query += " AND log_level = ?" args = append(args, filter.LogLevel) } // Add ordering and limit query += " ORDER BY timestamp DESC LIMIT ?" args = append(args, filter.PageSize) // Execute query with ALLOW FILTERING iter := s.session.Query(query+" ALLOW FILTERING", args...).Iter() var logs []Log var log Log for iter.Scan( &log.ApplicationID, &log.Timestamp, &log.LogID, &log.UserID, &log.Level, &log.Message, ) { logs = append(logs, log) } if err := iter.Close(); err != nil { return nil, fmt.Errorf("error fetching logs: %w", err) } return logs, nil } func (s *service) ValidateApplicationKey(keyHash string) (*Application, error) { var app Application // Query the applications table using the key hash if err := s.session.Query(` SELECT id, user_id, name, description, key_hash, created_at, updated_at FROM applications WHERE key_hash = ? ALLOW FILTERING`, keyHash, ).Scan( &app.ID, &app.UserID, &app.Name, &app.Description, &app.KeyHash, &app.CreatedAt, &app.UpdatedAt, ); err != nil { if err == gocql.ErrNotFound { return nil, fmt.Errorf("invalid API key") } return nil, fmt.Errorf("error validating API key: %w", err) } return &app, nil } func (s *service) GenerateDummyLogs(applicationID gocql.UUID) (int, error) { // Get the application to ensure it exists and get its user ID var app Application if err := s.session.Query(` SELECT id, user_id FROM applications WHERE id = ? `, applicationID).Scan(&app.ID, &app.UserID); err != nil { return 0, fmt.Errorf("application not found: %w", err) } // Generate 30 logs spanning the last 5 minutes now := time.Now() startTime := now.Add(-5 * time.Minute) batch := s.session.NewBatch(gocql.UnloggedBatch) // Sample messages and levels for variety messages := []string{ "User authentication successful", "Database connection established", "API request processed successfully", "Cache miss for key: %s", "Background job completed in %dms", "Memory usage: %d MB", "Request failed with status code: %d", "Rate limit exceeded for IP: %s", "Configuration reload initiated", "File upload completed: %s", } levelWeights := map[string]int{ "DEBUG": 15, "INFO": 60, "WARN": 15, "ERROR": 8, "FATAL": 2, } // Random data for interpolation sampleIPs := []string{"192.168.1.1", "10.0.0.1", "172.16.0.1", "8.8.8.8"} sampleFiles := []string{"user.jpg", "data.csv", "config.json", "backup.zip"} sampleKeys := []string{"user:1234", "session:5678", "settings:9012", "temp:3456"} for i := 0; i < 30; i++ { // Calculate timestamp with even distribution over 5 minutes progress := float64(i) / 30.0 timestamp := startTime.Add(time.Duration(progress * 5 * float64(time.Minute))) // Select log level based on weights rand.Seed(uint64(time.Now().UnixNano())) r := rand.Intn(100) var level string sum := 0 for l, weight := range levelWeights { sum += weight if r < sum { level = l break } } // Select and format message msgTemplate := messages[rand.Intn(len(messages))] var msg string switch { case strings.Contains(msgTemplate, "IP:"): msg = fmt.Sprintf(msgTemplate, sampleIPs[rand.Intn(len(sampleIPs))]) case strings.Contains(msgTemplate, "File"): msg = fmt.Sprintf(msgTemplate, sampleFiles[rand.Intn(len(sampleFiles))]) case strings.Contains(msgTemplate, "key:"): msg = fmt.Sprintf(msgTemplate, sampleKeys[rand.Intn(len(sampleKeys))]) case strings.Contains(msgTemplate, "MB"): msg = fmt.Sprintf(msgTemplate, rand.Intn(1000)) case strings.Contains(msgTemplate, "ms"): msg = fmt.Sprintf(msgTemplate, rand.Intn(500)) case strings.Contains(msgTemplate, "status code:"): codes := []int{400, 401, 403, 404, 500, 502, 503} msg = fmt.Sprintf(msgTemplate, codes[rand.Intn(len(codes))]) default: msg = msgTemplate } // Add to batch batch.Query(` INSERT INTO logs ( application_id, timestamp, log_id, user_id, log_level, message ) VALUES (?, ?, ?, ?, ?, ?)`, applicationID, timestamp, gocql.TimeUUID(), app.UserID, level, msg, ) } if err := s.session.ExecuteBatch(batch); err != nil { return 0, fmt.Errorf("failed to insert dummy logs: %w", err) } return 30, nil } |