backup current work
This commit is contained in:
parent
00775d3514
commit
4b586b3860
@ -366,6 +366,8 @@ func getBenchmark(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getAISummary(benchmarkDatas, benchmark.Title, benchmark.Description, "", "")
|
||||||
|
|
||||||
c.HTML(http.StatusOK, "benchmark.tmpl", gin.H{
|
c.HTML(http.StatusOK, "benchmark.tmpl", gin.H{
|
||||||
"activePage": "benchmark",
|
"activePage": "benchmark",
|
||||||
"username": session.Get("Username"),
|
"username": session.Get("Username"),
|
||||||
|
@ -15,6 +15,8 @@ type Config struct {
|
|||||||
DiscordClientSecret string
|
DiscordClientSecret string
|
||||||
DiscordRedirectURL string
|
DiscordRedirectURL string
|
||||||
SessionSecret string
|
SessionSecret string
|
||||||
|
OpenAIApiKey string
|
||||||
|
OpenAIModel string
|
||||||
|
|
||||||
Version bool
|
Version bool
|
||||||
}
|
}
|
||||||
@ -27,6 +29,8 @@ func NewConfig() (*Config, error) {
|
|||||||
discordClientSecret := flag.String("discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)")
|
discordClientSecret := flag.String("discord-client-secret", "", "Discord OAuth2 client secret (see https://discord.com/developers/applications)")
|
||||||
discordRedirectURL := flag.String("discord-redirect-url", "", "Discord OAuth2 redirect URL (<scheme>://<domain>/login/callback)")
|
discordRedirectURL := flag.String("discord-redirect-url", "", "Discord OAuth2 redirect URL (<scheme>://<domain>/login/callback)")
|
||||||
sessionSecret := flag.String("session-secret", "", "Session secret")
|
sessionSecret := flag.String("session-secret", "", "Session secret")
|
||||||
|
openaiApiKey := flag.String("openai-api-key", "", "OpenAI API Key (optional, leave empty to disable OpenAI integration)")
|
||||||
|
openaiModel := flag.String("openai-model", "", "OpenAI model ID (optional, leave empty to use the default model)")
|
||||||
flagVersion := flag.Bool("version", false, "prints version of the application")
|
flagVersion := flag.Bool("version", false, "prints version of the application")
|
||||||
|
|
||||||
envflag.Parse(envflag.WithPrefix("FS_"))
|
envflag.Parse(envflag.WithPrefix("FS_"))
|
||||||
@ -39,6 +43,8 @@ func NewConfig() (*Config, error) {
|
|||||||
DiscordClientSecret: *discordClientSecret,
|
DiscordClientSecret: *discordClientSecret,
|
||||||
DiscordRedirectURL: *discordRedirectURL,
|
DiscordRedirectURL: *discordRedirectURL,
|
||||||
SessionSecret: *sessionSecret,
|
SessionSecret: *sessionSecret,
|
||||||
|
OpenAIApiKey: *openaiApiKey,
|
||||||
|
OpenAIModel: *openaiModel,
|
||||||
|
|
||||||
Version: *flagVersion,
|
Version: *flagVersion,
|
||||||
}
|
}
|
||||||
@ -62,6 +68,9 @@ func NewConfig() (*Config, error) {
|
|||||||
if config.SessionSecret == "" {
|
if config.SessionSecret == "" {
|
||||||
return nil, errors.New("missing session-secret argument")
|
return nil, errors.New("missing session-secret argument")
|
||||||
}
|
}
|
||||||
|
if (config.OpenAIApiKey == "" && config.OpenAIModel != "") || (config.OpenAIApiKey != "" && config.OpenAIModel == "") {
|
||||||
|
return nil, errors.New("openai-api-key and openai-model must be both empty or both non-empty")
|
||||||
|
}
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
1
go.mod
1
go.mod
@ -40,6 +40,7 @@ require (
|
|||||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/ravener/discord-oauth2 v0.0.0-20230514095040-ae65713199b3
|
github.com/ravener/discord-oauth2 v0.0.0-20230514095040-ae65713199b3
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
|
github.com/sashabaranov/go-openai v1.28.0
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||||
golang.org/x/arch v0.8.0 // indirect
|
golang.org/x/arch v0.8.0 // indirect
|
||||||
|
2
go.sum
2
go.sum
@ -171,6 +171,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||||
|
github.com/sashabaranov/go-openai v1.28.0 h1:WS9F9BriSvtHvknPQy2Oi3b+8zkmJdEXcycrWqrSicQ=
|
||||||
|
github.com/sashabaranov/go-openai v1.28.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
249
openai.go
Normal file
249
openai.go
Normal file
@ -0,0 +1,249 @@
|
|||||||
|
package flightlesssomething
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
openai "github.com/sashabaranov/go-openai"
|
||||||
|
)
|
||||||
|
|
||||||
|
const systemMessage = `
|
||||||
|
You are given a summary of PC benchmark data. Your task is to provide conclusion and overview of the given data:
|
||||||
|
|
||||||
|
0. Your summary must consist of max 3 segments - "Highest and Smoothest FPS", "Anomalies" and "Summary".
|
||||||
|
1. Provide which run has the highest (average) fps and which has the smoothest fps (based on fps/frametime std.dev. and variance). Do not hesitate to mention multiple runs if they are incredibly similar. Also provide overall the best run. Try to understand which one has the best sweet "average" in terms of being smoothest and highest FPS.
|
||||||
|
2. Anomalies in the data (if any). For example, if all benchmarks uses the same hardware/software? Or of certain run has lower/higher FPS that correlates to higher/lower VRAM usage, core clock, mem clock, etc. Try to figure out why is it so, by looking ONLY at the provided data. Do NOT mention anything if it's not an anomaly.
|
||||||
|
3. If certain run had much worse FPS/Frametime than others, then exclude it from consideration in point 1. In point 2, try to figure out why it is so (first consider GPU VRAM, core clock, mem clock, then RAM/SWAP and other factors, while lastly CPU and GPU usage). If you can't figure out why, then just say so.
|
||||||
|
4. Point 3 must be your TOP priority. Do NOT provide any other information than requested.
|
||||||
|
5. You can mention labels in a natural way. E.g. you can call "lavd-defaults" just "LAVD" (if this makes sense).
|
||||||
|
6. Use bullet points for point 1 and 2. Use paragraph for point 3.
|
||||||
|
7. NEVER provide actual number or "higher/lower than". Instead, ALWAYS provide exact/approximate percentage in comparison to others.
|
||||||
|
8. NEVER guess the issue outside of the provided data. If you can't figure out why, then just say so.
|
||||||
|
9. ALWAYS mention in "anomalies" if certain run has correlation of higher/lower FPS with certain metrics (e.g. VRAM usage, core clock, mem clock, ram, swap, cpu, gpu). Only mention if there is significant correlation, at least 5 percent.
|
||||||
|
10. Provide an extended summary overview of all runs, but avoid repeating yourself of what you mentioned in point 1 and 2.
|
||||||
|
|
||||||
|
Do not provide numbers or visualize anything - user can already see charts.
|
||||||
|
`
|
||||||
|
|
||||||
|
func getAISummary(bds []*BenchmarkData, bdTitle, bdDescription, openaiApiKey, openaiModel string) (string, error) {
|
||||||
|
userPrompt := writeAIPrompt(bds, bdTitle, bdDescription)
|
||||||
|
fmt.Println(userPrompt)
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
|
||||||
|
client := openai.NewClient(openaiApiKey)
|
||||||
|
resp, err := client.CreateChatCompletion(
|
||||||
|
context.Background(),
|
||||||
|
openai.ChatCompletionRequest{
|
||||||
|
Model: openaiModel,
|
||||||
|
Messages: []openai.ChatCompletionMessage{
|
||||||
|
{Role: openai.ChatMessageRoleSystem, Content: systemMessage},
|
||||||
|
{Role: openai.ChatMessageRoleUser, Content: userPrompt},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.Choices[0].Message.Content, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeAIPrompt(bds []*BenchmarkData, bdTitle, bdDescription string) string {
|
||||||
|
sb := strings.Builder{}
|
||||||
|
sb.WriteString("Benchmark title: ")
|
||||||
|
sb.WriteString(bdTitle)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("Benchmark description: \n")
|
||||||
|
sb.WriteString(bdDescription)
|
||||||
|
sb.WriteString("\n\n")
|
||||||
|
sb.WriteString("Benchmark contains ")
|
||||||
|
sb.WriteString(strconv.Itoa(len(bds)))
|
||||||
|
sb.WriteString(" runs:\n")
|
||||||
|
|
||||||
|
for _, benchmarkRun := range bds {
|
||||||
|
sb.WriteString("\nLabel: ")
|
||||||
|
sb.WriteString(benchmarkRun.Label)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
sb.WriteString("OS: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecOS)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("GPU: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecGPU)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("CPU: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecCPU)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("RAM: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecRAM)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("Linux kernel: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecLinuxKernel)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
sb.WriteString("Linux scheduler: ")
|
||||||
|
sb.WriteString(benchmarkRun.SpecLinuxScheduler)
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// FPS
|
||||||
|
stats := calculateAIPromptArrayStats(benchmarkRun.DataFPS)
|
||||||
|
sb.WriteString("FPS: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// Frame time
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataFrameTime)
|
||||||
|
sb.WriteString("Frame time: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// CPU load
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataCPULoad)
|
||||||
|
sb.WriteString("CPU load: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU load
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPULoad)
|
||||||
|
sb.WriteString("GPU load: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// CPU temp
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataCPUTemp)
|
||||||
|
sb.WriteString("CPU temp: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU temp
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUTemp)
|
||||||
|
sb.WriteString("GPU temp: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU core clock
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUCoreClock)
|
||||||
|
sb.WriteString("GPU core clock: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU mem clock
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUMemClock)
|
||||||
|
sb.WriteString("GPU mem clock: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU VRAM used
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUVRAMUsed)
|
||||||
|
sb.WriteString("GPU VRAM used: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// GPU power
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataGPUPower)
|
||||||
|
sb.WriteString("GPU power: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// RAM used
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataRAMUsed)
|
||||||
|
sb.WriteString("RAM used: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
|
||||||
|
// Swap used
|
||||||
|
stats = calculateAIPromptArrayStats(benchmarkRun.DataSwapUsed)
|
||||||
|
sb.WriteString("Swap used: ")
|
||||||
|
sb.WriteString(stats.String())
|
||||||
|
sb.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return sb.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type AIPromptArrayStats struct {
|
||||||
|
Count int
|
||||||
|
Lowest float64
|
||||||
|
Low1Percent float64
|
||||||
|
Mean float64
|
||||||
|
Median float64
|
||||||
|
Top97Percent float64
|
||||||
|
Highest float64
|
||||||
|
StdDev float64
|
||||||
|
Variance float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateAIPromptArrayStats(data []float64) AIPromptArrayStats {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return AIPromptArrayStats{}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Float64s(data)
|
||||||
|
count := len(data)
|
||||||
|
lowest := data[0]
|
||||||
|
highest := data[count-1]
|
||||||
|
|
||||||
|
low1PercentIndex := int(math.Ceil(0.01*float64(count))) - 1
|
||||||
|
if low1PercentIndex < 0 {
|
||||||
|
low1PercentIndex = 0
|
||||||
|
}
|
||||||
|
low1Percent := data[low1PercentIndex]
|
||||||
|
|
||||||
|
top97PercentIndex := int(math.Ceil(0.97*float64(count))) - 1
|
||||||
|
if top97PercentIndex < 0 {
|
||||||
|
top97PercentIndex = 0
|
||||||
|
}
|
||||||
|
top97Percent := data[top97PercentIndex]
|
||||||
|
|
||||||
|
mean := 0.0
|
||||||
|
for _, value := range data {
|
||||||
|
mean += value
|
||||||
|
}
|
||||||
|
mean /= float64(count)
|
||||||
|
|
||||||
|
median := 0.0
|
||||||
|
if count%2 == 0 {
|
||||||
|
median = (data[count/2-1] + data[count/2]) / 2
|
||||||
|
} else {
|
||||||
|
median = data[count/2]
|
||||||
|
}
|
||||||
|
|
||||||
|
variance := 0.0
|
||||||
|
for _, value := range data {
|
||||||
|
variance += (value - mean) * (value - mean)
|
||||||
|
}
|
||||||
|
variance /= float64(count)
|
||||||
|
|
||||||
|
stdDev := math.Sqrt(variance)
|
||||||
|
|
||||||
|
return AIPromptArrayStats{
|
||||||
|
Count: count,
|
||||||
|
Lowest: lowest,
|
||||||
|
Low1Percent: low1Percent,
|
||||||
|
Mean: mean,
|
||||||
|
Median: median,
|
||||||
|
Top97Percent: top97Percent,
|
||||||
|
Highest: highest,
|
||||||
|
StdDev: stdDev,
|
||||||
|
Variance: variance,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as AIPromptArrayStats) String() string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
"Count: " + strconv.Itoa(as.Count),
|
||||||
|
"Lowest: " + strconv.FormatFloat(as.Lowest, 'f', -1, 64),
|
||||||
|
"Low1Percent: " + strconv.FormatFloat(as.Low1Percent, 'f', -1, 64),
|
||||||
|
"Mean: " + strconv.FormatFloat(as.Mean, 'f', -1, 64),
|
||||||
|
"Median: " + strconv.FormatFloat(as.Median, 'f', -1, 64),
|
||||||
|
"Top97Percent: " + strconv.FormatFloat(as.Top97Percent, 'f', -1, 64),
|
||||||
|
"Highest: " + strconv.FormatFloat(as.Highest, 'f', -1, 64),
|
||||||
|
"StdDev: " + strconv.FormatFloat(as.StdDev, 'f', -1, 64),
|
||||||
|
"Variance: " + strconv.FormatFloat(as.Variance, 'f', -1, 64),
|
||||||
|
}, ", ")
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user