Skip to content
Snippets Groups Projects
Verified Commit b4f38713 authored by Volker Schukai's avatar Volker Schukai :alien:
Browse files

fix: save without stats #63

parent 51d60bd0
Branches
Tags v1.18.1
No related merge requests found
......@@ -40,7 +40,7 @@ func TestWriteToDB4(t *testing.T) {
id := job.GetID()
job.mu.Lock()
job.stats = JobStats{
job.stats = &JobStats{
JobID: id,
RunCount: 20,
SuccessCount: 30,
......
......@@ -86,7 +86,7 @@ func TestCheckAndSaveOrUpdate(t *testing.T) {
assert.Equal(t, PriorityDefault, jobPersistence.Priority) // second update should not update the priority, because update only update stats and logs
// set stats and logs and test
job.stats = JobStats{
job.stats = &JobStats{
JobID: job.GetID(),
RunCount: 2,
SuccessCount: 3,
......
......@@ -135,7 +135,7 @@ func update(job *JobPersistence, db *gorm.DB) error {
return db.Transaction(func(tx *gorm.DB) error {
if job.Stats != (JobStats{}) {
if job.Stats != nil {
Info("Updating stats for job %s", job.ID)
if job.Stats.RunCount == 0 {
......@@ -192,7 +192,7 @@ func save(job *JobPersistence, db *gorm.DB) error {
}
}
if job.Stats != (JobStats{}) {
if job.Stats != nil {
job.Stats.JobID = job.ID
if err := tx.Model(job.Stats).
......
......@@ -54,7 +54,7 @@ type Job[T any] struct {
runner Runnable[T]
stats JobStats
stats *JobStats
logs []JobLog
}
......@@ -65,7 +65,7 @@ func NewJob[T any](id JobID, runner Runnable[T]) *Job[T] {
runner: runner,
priority: PriorityDefault,
logs: make([]JobLog, 0),
stats: JobStats{},
stats: &JobStats{},
}
}
......@@ -84,7 +84,7 @@ func (j *Job[T]) GetStats() JobStats {
defer j.mu.Unlock()
// workaround for gorm
j.stats.JobID = j.id
return j.stats
return *j.stats
}
// GetPersistence returns the persistence of the job
......@@ -165,7 +165,7 @@ func (j *Job[T]) Resume() {
func (j *Job[T]) ResetStats() {
j.mu.Lock()
defer j.mu.Unlock()
j.stats = JobStats{
j.stats = &JobStats{
JobID: j.id,
RunCount: 0,
SuccessCount: 0,
......@@ -216,6 +216,12 @@ func (j *Job[T]) Execute(ctx context.Context) (RunGenericResult, error) {
defer j.mu.Unlock()
// Update RunCount
if j.stats == nil {
j.stats = &JobStats{
JobID: j.id,
}
}
j.stats.RunCount++
// Update TimeMetrics
......
......@@ -34,8 +34,8 @@ type JobPersistence struct {
RetryDelayString *string `yaml:"retryDelay,omitempty" json:"retryDelay,omitempty" gorm:"-"`
PauseUntilString *string `yaml:"pauseUntil" json:"pauseUntil,omitempty" gorm:"-"`
Logs []JobLog `gorm:"foreignKey:JobID;references:ID" json:"-" yaml:"-"`
Stats JobStats `gorm:"foreignKey:JobID" json:"stats" yaml:"stats"`
Logs []JobLog `gorm:"foreignKey:JobID;references:ID" json:"-" yaml:"-"`
Stats *JobStats `gorm:"foreignKey:JobID" json:"stats" yaml:"stats"`
CreatedAt time.Time `gorm:"column:created_at" json:"createdAt" yaml:"createdAt"`
UpdatedAt time.Time `gorm:"column:updated_at" json:"updatedAt" yaml:"updatedAt"`
......@@ -155,7 +155,7 @@ func (jp JobPersistence) GetLogs() []JobLog {
}
func (jp JobPersistence) GetStats() JobStats {
return jp.Stats
return *jp.Stats
}
func (jp JobPersistence) GetID() JobID {
......
......@@ -126,6 +126,9 @@ func TestJobPersistence_MarshalUnmarshalJSON(t *testing.T) {
TimeoutString: &time5m0sString,
RetryDelayString: &time10sString,
PauseUntilString: &timeRefString,
Stats: &JobStats{
JobID: "",
},
},
expected: ` {"id":"","description":"","priority":0,"maxRetries":0,"runnable":{"type":""},"scheduler":{"type":""},"pause":false,"pauseReason":"","timeout":"5m0s","retryDelay":"10s","pauseUntil":"` + timeRefString + `","stats":{"jobId":"","runCount":0,"successCount":0,"errorCount":0,"timeMetrics":{"avg":0,"max":0,"min":0,"total":0},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"}`,
},
......@@ -139,7 +142,7 @@ func TestJobPersistence_MarshalUnmarshalJSON(t *testing.T) {
RetryDelayString: &time30sString,
PauseUntilString: &emptyString,
},
expected: `{"id":"","description":"","priority":0,"maxRetries":0,"runnable":{"type":""},"scheduler":{"type":""},"pause":false,"pauseReason":"","timeout":"1h0m0s","retryDelay":"30s","pauseUntil":"","stats":{"jobId":"","runCount":0,"successCount":0,"errorCount":0,"timeMetrics":{"avg":0,"max":0,"min":0,"total":0},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"}`,
expected: `{"id":"","description":"","priority":0,"maxRetries":0,"runnable":{"type":""},"scheduler":{"type":""},"pause":false,"pauseReason":"","timeout":"1h0m0s","retryDelay":"30s","pauseUntil":"","stats":null,"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"}`,
},
{
name: "Nil timeout and retryDelay",
......@@ -151,7 +154,7 @@ func TestJobPersistence_MarshalUnmarshalJSON(t *testing.T) {
RetryDelayString: &emptyString,
PauseUntilString: &emptyString,
},
expected: `{"id":"","description":"","priority":0,"maxRetries":0,"runnable":{"type":""},"scheduler":{"type":""},"pause":false,"pauseReason":"","timeout":"","retryDelay":"","pauseUntil":"","stats":{"jobId":"","runCount":0,"successCount":0,"errorCount":0,"timeMetrics":{"avg":0,"max":0,"min":0,"total":0},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"},"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"}`,
expected: `{"id":"","description":"","priority":0,"maxRetries":0,"runnable":{"type":""},"scheduler":{"type":""},"pause":false,"pauseReason":"","timeout":"","retryDelay":"","pauseUntil":"","stats":null,"createdAt":"0001-01-01T00:00:00Z","updatedAt":"0001-01-01T00:00:00Z"}`,
},
}
......@@ -159,12 +162,14 @@ func TestJobPersistence_MarshalUnmarshalJSON(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
// MarshalJSON testen
data, err := json.Marshal(tc.job)
//t.Log(string(data))
assert.NoError(t, err)
assert.JSONEq(t, tc.expected, string(data))
var job JobPersistence
err = json.Unmarshal(data, &job)
assert.NoError(t, err)
assert.Equal(t, tc.job.Timeout, job.Timeout)
assert.Equal(t, tc.job.RetryDelay, job.RetryDelay)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment