diff --git a/benchmarks.go b/benchmarks.go index 8fad4b1..eb85438 100644 --- a/benchmarks.go +++ b/benchmarks.go @@ -225,6 +225,10 @@ func postBenchmarkCreate(c *gin.Context) { return } + if openaiClient != nil { + go generateSummary(&benchmark, csvFiles) + } + // Redirect to the newly created benchmark using GET request c.Redirect(http.StatusSeeOther, fmt.Sprintf("/benchmark/%d", benchmark.ID)) } @@ -366,7 +370,10 @@ func getBenchmark(c *gin.Context) { return } - getAISummary(benchmarkDatas, benchmark.Title, benchmark.Description, "", "") + if benchmark.AiSummary == "" && openaiClient != nil { + go generateSummary(&benchmark, benchmarkDatas) + benchmark.AiSummary = "AI summary is being generated... Refresh the page later." + } c.HTML(http.StatusOK, "benchmark.tmpl", gin.H{ "activePage": "benchmark", diff --git a/config.go b/config.go index 6799d71..7f95013 100644 --- a/config.go +++ b/config.go @@ -8,47 +8,40 @@ import ( ) type Config struct { - Bind string - DataDir string + Bind string + DataDir string + SessionSecret string DiscordClientID string DiscordClientSecret string DiscordRedirectURL string - SessionSecret string - OpenAIApiKey string - OpenAIModel string + + OpenAIURL string + OpenAIApiKey string + OpenAIModel string Version bool } func NewConfig() (*Config, error) { - // Define the flags - bind := flag.String("bind", "0.0.0.0:8080", "Bind address and port") - dataDir := flag.String("data-dir", "/data", "Path where data would be stored") - discordClientID := flag.String("discord-client-id", "", "Discord OAuth2 client ID (see https://discord.com/developers/applications)") - discordClientSecret := flag.String("discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)") - discordRedirectURL := flag.String("discord-redirect-url", "", "Discord OAuth2 redirect URL (:///login/callback)") - sessionSecret := flag.String("session-secret", "", "Session secret") - openaiApiKey := flag.String("openai-api-key", "", "OpenAI API Key (optional, leave empty to disable OpenAI integration)") - openaiModel := flag.String("openai-model", "", "OpenAI model ID (optional, leave empty to use the default model)") - flagVersion := flag.Bool("version", false, "prints version of the application") + config := &Config{} + + flag.StringVar(&config.Bind, "bind", "0.0.0.0:8080", "Bind address and port") + flag.StringVar(&config.DataDir, "data-dir", "/data", "Path where data would be stored") + flag.StringVar(&config.SessionSecret, "session-secret", "", "Session secret") + + flag.StringVar(&config.DiscordClientID, "discord-client-id", "", "Discord OAuth2 client ID (see https://discord.com/developers/applications)") + flag.StringVar(&config.DiscordClientSecret, "discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)") + flag.StringVar(&config.DiscordRedirectURL, "discord-redirect-url", "", "Discord OAuth2 redirect URL (:///login/callback)") + + flag.StringVar(&config.OpenAIURL, "openai-url", "https://api.openai.com/v1", "OpenAI API URL") + flag.StringVar(&config.OpenAIModel, "openai-model", "gpt-4o", "OpenAI model ID") + flag.StringVar(&config.OpenAIApiKey, "openai-api-key", "", "OpenAI API Key (leave empty to disable OpenAI integration)") + + flag.BoolVar(&config.Version, "version", false, "prints version of the application") envflag.Parse(envflag.WithPrefix("FS_")) - // Assign the parsed flag values to the Config struct - config := &Config{ - Bind: *bind, - DataDir: *dataDir, - DiscordClientID: *discordClientID, - DiscordClientSecret: *discordClientSecret, - DiscordRedirectURL: *discordRedirectURL, - SessionSecret: *sessionSecret, - OpenAIApiKey: *openaiApiKey, - OpenAIModel: *openaiModel, - - Version: *flagVersion, - } - if config.Version { return config, nil } @@ -68,8 +61,13 @@ func NewConfig() (*Config, error) { if config.SessionSecret == "" { return nil, errors.New("missing session-secret argument") } - if (config.OpenAIApiKey == "" && config.OpenAIModel != "") || (config.OpenAIApiKey != "" && config.OpenAIModel == "") { - return nil, errors.New("openai-api-key and openai-model must be both empty or both non-empty") + if config.OpenAIApiKey != "" { + if config.OpenAIModel == "" { + return nil, errors.New("missing openai-model argument") + } + if config.OpenAIURL == "" { + return nil, errors.New("missing openai-url argument") + } } return config, nil diff --git a/models.go b/models.go index d80454f..4d5c3db 100644 --- a/models.go +++ b/models.go @@ -18,6 +18,7 @@ type Benchmark struct { UserID uint Title string Description string + AiSummary string CreatedAtHumanized string `gorm:"-"` // Human readable "X h/m/s ago" version of CreatedAt (filled automatically) diff --git a/openai.go b/openai.go index 52b3629..40a0c81 100644 --- a/openai.go +++ b/openai.go @@ -2,56 +2,77 @@ package flightlesssomething import ( "context" - "fmt" + "log" "math" "sort" "strconv" "strings" + "sync" openai "github.com/sashabaranov/go-openai" ) const systemMessage = ` -You are given a summary of PC benchmark data. Your task is to provide conclusion and overview of the given data: +You are given PC benchmark data of several runs. All this data is visible in the website in a form of charts and your goal is to provide insights. -0. Your summary must consist of max 3 segments - "Highest and Smoothest FPS", "Anomalies" and "Summary". -1. Provide which run has the highest (average) fps and which has the smoothest fps (based on fps/frametime std.dev. and variance). Do not hesitate to mention multiple runs if they are incredibly similar. Also provide overall the best run. Try to understand which one has the best sweet "average" in terms of being smoothest and highest FPS. -2. Anomalies in the data (if any). For example, if all benchmarks uses the same hardware/software? Or of certain run has lower/higher FPS that correlates to higher/lower VRAM usage, core clock, mem clock, etc. Try to figure out why is it so, by looking ONLY at the provided data. Do NOT mention anything if it's not an anomaly. -3. If certain run had much worse FPS/Frametime than others, then exclude it from consideration in point 1. In point 2, try to figure out why it is so (first consider GPU VRAM, core clock, mem clock, then RAM/SWAP and other factors, while lastly CPU and GPU usage). If you can't figure out why, then just say so. -4. Point 3 must be your TOP priority. Do NOT provide any other information than requested. -5. You can mention labels in a natural way. E.g. you can call "lavd-defaults" just "LAVD" (if this makes sense). -6. Use bullet points for point 1 and 2. Use paragraph for point 3. -7. NEVER provide actual number or "higher/lower than". Instead, ALWAYS provide exact/approximate percentage in comparison to others. -8. NEVER guess the issue outside of the provided data. If you can't figure out why, then just say so. -9. ALWAYS mention in "anomalies" if certain run has correlation of higher/lower FPS with certain metrics (e.g. VRAM usage, core clock, mem clock, ram, swap, cpu, gpu). Only mention if there is significant correlation, at least 5 percent. -10. Provide an extended summary overview of all runs, but avoid repeating yourself of what you mentioned in point 1 and 2. - -Do not provide numbers or visualize anything - user can already see charts. +You MUST: +1. Write at max 3 sections (headers) - "Top runs", "Issues" (optional) and "Summary". +2. In Issues section, Figure out if any of the run is significantly worse then others in the same benchmark. You MUST use ONLY the data provided to explain the difference, and your points must be based only on the data provided. If there are no issues - do not write this section. Do not make any guesses. Additional requirements: (a) validate if the same hardware/software was used (by using provided text fields, NOT the data), (b) do not speculate, but use numbers to back up your claims, (c) only write if it's an actual issue with FPS (everything else is just additional information). +3. In Top runs section, provide which run has the (average) "Highest FPS", which has the "Smoothest FPS" (LOWEST std.dev. and variance of FPS value - LOWEST, NOT HIGHEST) and which is the best "Best overall" (preferrably lower std.dev./variance than higher FPS, but if slight decrease in stability gives significantly higher FPS - pick that one). NEVER consider runs that have significantly lower FPS or has other significant issues. Exclude runs from consideration if they are significantly worse than the rest (as it would be mentioned in issues section). Note that your goal is to pick winners and not do a comparison in this section. Include numbers to justify your claims. +4. In Summary section, provide an overview of all runs. Mention which runs are similar and which are different. Mention which runs are better in terms of FPS and which are better in terms of stability. Mention if there are any issues and what could be the reason for them. In short - summarize whole benchmark. +5. First 2 sections should be bullet points, no subpoints, only 1 bullet point per point, while summary should be a single paragraph. +6. NEVER use actual numbers. Instead, use percentage in comparison to other runs. +7. Use markdown, use code syntax for labels. ` -func getAISummary(bds []*BenchmarkData, bdTitle, bdDescription, openaiApiKey, openaiModel string) (string, error) { - userPrompt := writeAIPrompt(bds, bdTitle, bdDescription) - fmt.Println(userPrompt) +var ( + inProgressSummaries = map[uint]struct{}{} + inProgressSummariesMux = &sync.Mutex{} +) - return "", nil +func generateSummary(b *Benchmark, bds []*BenchmarkData) { + // Check if OpenAI integration is not enabled + if openaiClient == nil { + return + } - client := openai.NewClient(openaiApiKey) - resp, err := client.CreateChatCompletion( + // Lock mutex, as integration is enabled and might be already in progress + inProgressSummariesMux.Lock() + + // Check if generation is already in progress + if _, ok := inProgressSummaries[b.ID]; ok { + inProgressSummariesMux.Unlock() + return + } + inProgressSummaries[b.ID] = struct{}{} + inProgressSummariesMux.Unlock() + + // Create user prompt + userPrompt := writeAIPrompt(bds, b.Title, b.Description) + + // Retrieve AI response + resp, err := openaiClient.CreateChatCompletion( context.Background(), openai.ChatCompletionRequest{ - Model: openaiModel, + Model: openaiModel, + Temperature: 0.0, Messages: []openai.ChatCompletionMessage{ {Role: openai.ChatMessageRoleSystem, Content: systemMessage}, {Role: openai.ChatMessageRoleUser, Content: userPrompt}, }, }, ) - if err != nil { - return "", err + log.Println("Failed to generate AI summary:", err) + return } - return resp.Choices[0].Message.Content, nil + db.Model(&Benchmark{}).Where("id = ?", b.ID).Update("AiSummary", resp.Choices[0].Message.Content) + + // Update status + inProgressSummariesMux.Lock() + delete(inProgressSummaries, b.ID) + inProgressSummariesMux.Unlock() } func writeAIPrompt(bds []*BenchmarkData, bdTitle, bdDescription string) string { diff --git a/server.go b/server.go index c728cbb..700178b 100644 --- a/server.go +++ b/server.go @@ -15,6 +15,7 @@ import ( "github.com/gin-gonic/gin" "github.com/glebarez/sqlite" "github.com/ravener/discord-oauth2" + openai "github.com/sashabaranov/go-openai" "golang.org/x/oauth2" "gorm.io/gorm" ) @@ -28,9 +29,22 @@ var ( // Benchmarks directory benchmarksDir string + + // OpenAI + openaiClient *openai.Client + openaiModel string ) func Start(c *Config, version string) { + // Setup OpenAI client // + + if c.OpenAIApiKey != "" { + openaiClientConf := openai.DefaultConfig(c.OpenAIApiKey) + openaiClientConf.BaseURL = c.OpenAIURL + openaiClient = openai.NewClientWithConfig(openaiClientConf) + openaiModel = c.OpenAIModel + } + // Setup data dir // _, err := os.Stat(c.DataDir) diff --git a/static/js/benchmark.js b/static/js/benchmark.js index 28be2fc..351ac13 100644 --- a/static/js/benchmark.js +++ b/static/js/benchmark.js @@ -1,22 +1,7 @@ -document.addEventListener('DOMContentLoaded', function () { - var converter = new showdown.Converter(); - var aiSummaryHTMLElement = document.getElementById('aiSummaryMarkdown'); - - // Create an Intersection Observer - var observer = new IntersectionObserver(function (entries, observer) { - entries.forEach(function (entry) { - if (entry.isIntersecting) { - // Element is visible, render the Markdown - var htmlContents = converter.makeHtml(aiSummaryHTMLElement.innerText); - aiSummaryHTMLElement.innerHTML = htmlContents; - // Stop observing after rendering - observer.unobserve(aiSummaryHTMLElement); - } - }); - }, { threshold: 1.0 }); - - // Start observing the element - observer.observe(aiSummaryHTMLElement); +document.addEventListener('DOMContentLoaded', () => { + const converter = new showdown.Converter(); + const element = document.getElementById('aiSummaryMarkdown'); + element.innerHTML = converter.makeHtml(element.innerHTML); }); // =========================================================================================== diff --git a/templates/benchmark.tmpl b/templates/benchmark.tmpl index fa25453..cc68e3d 100644 --- a/templates/benchmark.tmpl +++ b/templates/benchmark.tmpl @@ -87,8 +87,8 @@
-
- +
+{{ .benchmark.AiSummary }}