Compare commits

...

4 Commits
main ... ai

Author SHA1 Message Date
Erikas
fe6acca652 update docker-compose.yaml file 2024-08-08 13:16:19 +03:00
Erikas
49a60bef8d update docker-compose.yml 2024-08-08 13:15:58 +03:00
Erikas
e8bb9f124f Add AI functionality 2024-08-08 13:14:21 +03:00
Erikas
4b586b3860 backup current work 2024-08-07 21:22:31 +03:00
10 changed files with 337 additions and 45 deletions

View File

@ -225,6 +225,10 @@ func postBenchmarkCreate(c *gin.Context) {
return return
} }
if openaiClient != nil {
go generateSummary(&benchmark, csvFiles)
}
// Redirect to the newly created benchmark using GET request // Redirect to the newly created benchmark using GET request
c.Redirect(http.StatusSeeOther, fmt.Sprintf("/benchmark/%d", benchmark.ID)) c.Redirect(http.StatusSeeOther, fmt.Sprintf("/benchmark/%d", benchmark.ID))
} }
@ -366,6 +370,11 @@ func getBenchmark(c *gin.Context) {
return return
} }
if benchmark.AiSummary == "" && openaiClient != nil {
go generateSummary(&benchmark, benchmarkDatas)
benchmark.AiSummary = "AI summary is being generated... Refresh the page later."
}
c.HTML(http.StatusOK, "benchmark.tmpl", gin.H{ c.HTML(http.StatusOK, "benchmark.tmpl", gin.H{
"activePage": "benchmark", "activePage": "benchmark",
"username": session.Get("Username"), "username": session.Get("Username"),

View File

@ -8,41 +8,40 @@ import (
) )
type Config struct { type Config struct {
Bind string Bind string
DataDir string DataDir string
SessionSecret string
DiscordClientID string DiscordClientID string
DiscordClientSecret string DiscordClientSecret string
DiscordRedirectURL string DiscordRedirectURL string
SessionSecret string
OpenAIURL string
OpenAIApiKey string
OpenAIModel string
Version bool Version bool
} }
func NewConfig() (*Config, error) { func NewConfig() (*Config, error) {
// Define the flags config := &Config{}
bind := flag.String("bind", "0.0.0.0:8080", "Bind address and port")
dataDir := flag.String("data-dir", "/data", "Path where data would be stored") flag.StringVar(&config.Bind, "bind", "0.0.0.0:8080", "Bind address and port")
discordClientID := flag.String("discord-client-id", "", "Discord OAuth2 client ID (see https://discord.com/developers/applications)") flag.StringVar(&config.DataDir, "data-dir", "/data", "Path where data would be stored")
discordClientSecret := flag.String("discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)") flag.StringVar(&config.SessionSecret, "session-secret", "", "Session secret")
discordRedirectURL := flag.String("discord-redirect-url", "", "Discord OAuth2 redirect URL (<scheme>://<domain>/login/callback)")
sessionSecret := flag.String("session-secret", "", "Session secret") flag.StringVar(&config.DiscordClientID, "discord-client-id", "", "Discord OAuth2 client ID (see https://discord.com/developers/applications)")
flagVersion := flag.Bool("version", false, "prints version of the application") flag.StringVar(&config.DiscordClientSecret, "discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)")
flag.StringVar(&config.DiscordRedirectURL, "discord-redirect-url", "", "Discord OAuth2 redirect URL (<scheme>://<domain>/login/callback)")
flag.StringVar(&config.OpenAIURL, "openai-url", "https://api.openai.com/v1", "OpenAI API URL")
flag.StringVar(&config.OpenAIModel, "openai-model", "gpt-4o", "OpenAI model ID")
flag.StringVar(&config.OpenAIApiKey, "openai-api-key", "", "OpenAI API Key (leave empty to disable OpenAI integration)")
flag.BoolVar(&config.Version, "version", false, "prints version of the application")
envflag.Parse(envflag.WithPrefix("FS_")) envflag.Parse(envflag.WithPrefix("FS_"))
// Assign the parsed flag values to the Config struct
config := &Config{
Bind: *bind,
DataDir: *dataDir,
DiscordClientID: *discordClientID,
DiscordClientSecret: *discordClientSecret,
DiscordRedirectURL: *discordRedirectURL,
SessionSecret: *sessionSecret,
Version: *flagVersion,
}
if config.Version { if config.Version {
return config, nil return config, nil
} }
@ -62,6 +61,14 @@ func NewConfig() (*Config, error) {
if config.SessionSecret == "" { if config.SessionSecret == "" {
return nil, errors.New("missing session-secret argument") return nil, errors.New("missing session-secret argument")
} }
if config.OpenAIApiKey != "" {
if config.OpenAIModel == "" {
return nil, errors.New("missing openai-model argument")
}
if config.OpenAIURL == "" {
return nil, errors.New("missing openai-url argument")
}
}
return config, nil return config, nil
} }

View File

@ -10,8 +10,11 @@ services:
environment: environment:
- FS_BIND=0.0.0.0:8080 - FS_BIND=0.0.0.0:8080
- FS_DATA_DIR=/data - FS_DATA_DIR=/data
- FS_SESSION_SECRET=xxxxxxxxxxxxxxxxxx
- FS_DISCORD_CLIENT_ID=xxxxxxxxxxxxxxxxxx - FS_DISCORD_CLIENT_ID=xxxxxxxxxxxxxxxxxx
- FS_DISCORD_CLIENT_SECRET=xxxxxxxxxxxxxxxxxx - FS_DISCORD_CLIENT_SECRET=xxxxxxxxxxxxxxxxxx
- FS_DISCORD_REDIRECT_URL=<scheme>://<domain>/login/callback - FS_DISCORD_REDIRECT_URL=<scheme>://<domain>/login/callback
- FS_SESSION_SECRET=xxxxxxxxxxxxxxxxxx - FS_OPENAI_API_KEY=xxxxxxxxxxxxxxxxxx
# - FS_OPENAI_MODEL=gpt-4o
# - FS_OPENAI_URL=https://api.openai.com/v1
restart: unless-stopped restart: unless-stopped

1
go.mod
View File

@ -40,6 +40,7 @@ require (
github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/ravener/discord-oauth2 v0.0.0-20230514095040-ae65713199b3 github.com/ravener/discord-oauth2 v0.0.0-20230514095040-ae65713199b3
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/sashabaranov/go-openai v1.28.0
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect github.com/ugorji/go/codec v1.2.12 // indirect
golang.org/x/arch v0.8.0 // indirect golang.org/x/arch v0.8.0 // indirect

2
go.sum
View File

@ -171,6 +171,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/sashabaranov/go-openai v1.28.0 h1:WS9F9BriSvtHvknPQy2Oi3b+8zkmJdEXcycrWqrSicQ=
github.com/sashabaranov/go-openai v1.28.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=

View File

@ -18,6 +18,7 @@ type Benchmark struct {
UserID uint UserID uint
Title string Title string
Description string Description string
AiSummary string
CreatedAtHumanized string `gorm:"-"` // Human readable "X h/m/s ago" version of CreatedAt (filled automatically) CreatedAtHumanized string `gorm:"-"` // Human readable "X h/m/s ago" version of CreatedAt (filled automatically)

270
openai.go Normal file
View File

@ -0,0 +1,270 @@
package flightlesssomething
import (
"context"
"log"
"math"
"sort"
"strconv"
"strings"
"sync"
openai "github.com/sashabaranov/go-openai"
)
const systemMessage = `
You are given PC benchmark data of several runs. All this data is visible in the website in a form of charts and your goal is to provide insights.
You MUST:
1. Write at max 3 sections (headers) - "Top runs", "Issues" (optional) and "Summary".
2. In Issues section, Figure out if any of the run is significantly worse then others in the same benchmark. You MUST use ONLY the data provided to explain the difference, and your points must be based only on the data provided. If there are no issues - do not write this section. Do not make any guesses. Additional requirements: (a) validate if the same hardware/software was used (by using provided text fields, NOT the data), (b) do not speculate, but use numbers to back up your claims, (c) only write if it's an actual issue with FPS (everything else is just additional information).
3. In Top runs section, provide which run has the (average) "Highest FPS", which has the "Smoothest FPS" (LOWEST std.dev. and variance of FPS value - LOWEST, NOT HIGHEST) and which is the best "Best overall" (preferrably lower std.dev./variance than higher FPS, but if slight decrease in stability gives significantly higher FPS - pick that one). NEVER consider runs that have significantly lower FPS or has other significant issues. Exclude runs from consideration if they are significantly worse than the rest (as it would be mentioned in issues section). Note that your goal is to pick winners and not do a comparison in this section. Include numbers to justify your claims.
4. In Summary section, provide an overview of all runs. Mention which runs are similar and which are different. Mention which runs are better in terms of FPS and which are better in terms of stability. Mention if there are any issues and what could be the reason for them. In short - summarize whole benchmark.
5. First 2 sections should be bullet points, no subpoints, only 1 bullet point per point, while summary should be a single paragraph.
6. NEVER use actual numbers. Instead, use percentage in comparison to other runs.
7. Use markdown, use code syntax for labels.
`
var (
inProgressSummaries = map[uint]struct{}{}
inProgressSummariesMux = &sync.Mutex{}
)
func generateSummary(b *Benchmark, bds []*BenchmarkData) {
// Check if OpenAI integration is not enabled
if openaiClient == nil {
return
}
// Lock mutex, as integration is enabled and might be already in progress
inProgressSummariesMux.Lock()
// Check if generation is already in progress
if _, ok := inProgressSummaries[b.ID]; ok {
inProgressSummariesMux.Unlock()
return
}
inProgressSummaries[b.ID] = struct{}{}
inProgressSummariesMux.Unlock()
// Create user prompt
userPrompt := writeAIPrompt(bds, b.Title, b.Description)
// Retrieve AI response
resp, err := openaiClient.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openaiModel,
Temperature: 0.0,
Messages: []openai.ChatCompletionMessage{
{Role: openai.ChatMessageRoleSystem, Content: systemMessage},
{Role: openai.ChatMessageRoleUser, Content: userPrompt},
},
},
)
if err != nil {
log.Println("Failed to generate AI summary:", err)
return
}
db.Model(&Benchmark{}).Where("id = ?", b.ID).Update("AiSummary", resp.Choices[0].Message.Content)
// Update status
inProgressSummariesMux.Lock()
delete(inProgressSummaries, b.ID)
inProgressSummariesMux.Unlock()
}
func writeAIPrompt(bds []*BenchmarkData, bdTitle, bdDescription string) string {
sb := strings.Builder{}
sb.WriteString("Benchmark title: ")
sb.WriteString(bdTitle)
sb.WriteString("\n")
sb.WriteString("Benchmark description: \n")
sb.WriteString(bdDescription)
sb.WriteString("\n\n")
sb.WriteString("Benchmark contains ")
sb.WriteString(strconv.Itoa(len(bds)))
sb.WriteString(" runs:\n")
for _, benchmarkRun := range bds {
sb.WriteString("\nLabel: ")
sb.WriteString(benchmarkRun.Label)
sb.WriteString("\n")
sb.WriteString("OS: ")
sb.WriteString(benchmarkRun.SpecOS)
sb.WriteString("\n")
sb.WriteString("GPU: ")
sb.WriteString(benchmarkRun.SpecGPU)
sb.WriteString("\n")
sb.WriteString("CPU: ")
sb.WriteString(benchmarkRun.SpecCPU)
sb.WriteString("\n")
sb.WriteString("RAM: ")
sb.WriteString(benchmarkRun.SpecRAM)
sb.WriteString("\n")
sb.WriteString("Linux kernel: ")
sb.WriteString(benchmarkRun.SpecLinuxKernel)
sb.WriteString("\n")
sb.WriteString("Linux scheduler: ")
sb.WriteString(benchmarkRun.SpecLinuxScheduler)
sb.WriteString("\n")
// FPS
stats := calculateAIPromptArrayStats(benchmarkRun.DataFPS)
sb.WriteString("FPS: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// Frame time
stats = calculateAIPromptArrayStats(benchmarkRun.DataFrameTime)
sb.WriteString("Frame time: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// CPU load
stats = calculateAIPromptArrayStats(benchmarkRun.DataCPULoad)
sb.WriteString("CPU load: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU load
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPULoad)
sb.WriteString("GPU load: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// CPU temp
stats = calculateAIPromptArrayStats(benchmarkRun.DataCPUTemp)
sb.WriteString("CPU temp: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU temp
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUTemp)
sb.WriteString("GPU temp: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU core clock
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUCoreClock)
sb.WriteString("GPU core clock: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU mem clock
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUMemClock)
sb.WriteString("GPU mem clock: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU VRAM used
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUVRAMUsed)
sb.WriteString("GPU VRAM used: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// GPU power
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUPower)
sb.WriteString("GPU power: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// RAM used
stats = calculateAIPromptArrayStats(benchmarkRun.DataRAMUsed)
sb.WriteString("RAM used: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
// Swap used
stats = calculateAIPromptArrayStats(benchmarkRun.DataSwapUsed)
sb.WriteString("Swap used: ")
sb.WriteString(stats.String())
sb.WriteString("\n")
}
return sb.String()
}
type AIPromptArrayStats struct {
Count int
Lowest float64
Low1Percent float64
Mean float64
Median float64
Top97Percent float64
Highest float64
StdDev float64
Variance float64
}
func calculateAIPromptArrayStats(data []float64) AIPromptArrayStats {
if len(data) == 0 {
return AIPromptArrayStats{}
}
sort.Float64s(data)
count := len(data)
lowest := data[0]
highest := data[count-1]
low1PercentIndex := int(math.Ceil(0.01*float64(count))) - 1
if low1PercentIndex < 0 {
low1PercentIndex = 0
}
low1Percent := data[low1PercentIndex]
top97PercentIndex := int(math.Ceil(0.97*float64(count))) - 1
if top97PercentIndex < 0 {
top97PercentIndex = 0
}
top97Percent := data[top97PercentIndex]
mean := 0.0
for _, value := range data {
mean += value
}
mean /= float64(count)
median := 0.0
if count%2 == 0 {
median = (data[count/2-1] + data[count/2]) / 2
} else {
median = data[count/2]
}
variance := 0.0
for _, value := range data {
variance += (value - mean) * (value - mean)
}
variance /= float64(count)
stdDev := math.Sqrt(variance)
return AIPromptArrayStats{
Count: count,
Lowest: lowest,
Low1Percent: low1Percent,
Mean: mean,
Median: median,
Top97Percent: top97Percent,
Highest: highest,
StdDev: stdDev,
Variance: variance,
}
}
func (as AIPromptArrayStats) String() string {
return strings.Join([]string{
"Count: " + strconv.Itoa(as.Count),
"Lowest: " + strconv.FormatFloat(as.Lowest, 'f', -1, 64),
"Low1Percent: " + strconv.FormatFloat(as.Low1Percent, 'f', -1, 64),
"Mean: " + strconv.FormatFloat(as.Mean, 'f', -1, 64),
"Median: " + strconv.FormatFloat(as.Median, 'f', -1, 64),
"Top97Percent: " + strconv.FormatFloat(as.Top97Percent, 'f', -1, 64),
"Highest: " + strconv.FormatFloat(as.Highest, 'f', -1, 64),
"StdDev: " + strconv.FormatFloat(as.StdDev, 'f', -1, 64),
"Variance: " + strconv.FormatFloat(as.Variance, 'f', -1, 64),
}, ", ")
}

View File

@ -15,6 +15,7 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/glebarez/sqlite" "github.com/glebarez/sqlite"
"github.com/ravener/discord-oauth2" "github.com/ravener/discord-oauth2"
openai "github.com/sashabaranov/go-openai"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"gorm.io/gorm" "gorm.io/gorm"
) )
@ -28,9 +29,22 @@ var (
// Benchmarks directory // Benchmarks directory
benchmarksDir string benchmarksDir string
// OpenAI
openaiClient *openai.Client
openaiModel string
) )
func Start(c *Config, version string) { func Start(c *Config, version string) {
// Setup OpenAI client //
if c.OpenAIApiKey != "" {
openaiClientConf := openai.DefaultConfig(c.OpenAIApiKey)
openaiClientConf.BaseURL = c.OpenAIURL
openaiClient = openai.NewClientWithConfig(openaiClientConf)
openaiModel = c.OpenAIModel
}
// Setup data dir // // Setup data dir //
_, err := os.Stat(c.DataDir) _, err := os.Stat(c.DataDir)

View File

@ -1,22 +1,7 @@
document.addEventListener('DOMContentLoaded', function () { document.addEventListener('DOMContentLoaded', () => {
var converter = new showdown.Converter(); const converter = new showdown.Converter();
var aiSummaryHTMLElement = document.getElementById('aiSummaryMarkdown'); const element = document.getElementById('aiSummaryMarkdown');
element.innerHTML = converter.makeHtml(element.innerHTML);
// Create an Intersection Observer
var observer = new IntersectionObserver(function (entries, observer) {
entries.forEach(function (entry) {
if (entry.isIntersecting) {
// Element is visible, render the Markdown
var htmlContents = converter.makeHtml(aiSummaryHTMLElement.innerText);
aiSummaryHTMLElement.innerHTML = htmlContents;
// Stop observing after rendering
observer.unobserve(aiSummaryHTMLElement);
}
});
}, { threshold: 1.0 });
// Start observing the element
observer.observe(aiSummaryHTMLElement);
}); });
// =========================================================================================== // ===========================================================================================

View File

@ -87,8 +87,8 @@
<!-- Summary tab --> <!-- Summary tab -->
<div class="tab-pane" id="summary" role="tabpanel" aria-labelledby="summary-tab"> <div class="tab-pane" id="summary" role="tabpanel" aria-labelledby="summary-tab">
<div class="row"> <div class="row">
<div class="col-md-12 text-center" id="aiSummaryMarkdown"> <div class="col-md-12" id="aiSummaryMarkdown">
<!-- TODO: markdown placeholder for AI summary --> {{ .benchmark.AiSummary }}
</div> </div>
</div> </div>