TP-5555 | clean up job
This commit is contained in:
@@ -86,3 +86,6 @@ mjolnir:
|
||||
houston:
|
||||
service.url: HOUSTON_SERVICE_URL
|
||||
realm.id: HOUSTON_REALM_ID
|
||||
|
||||
jobScheduler:
|
||||
retentionInDays: 7
|
||||
|
||||
@@ -9,20 +9,21 @@ import (
|
||||
)
|
||||
|
||||
type AppConfig struct {
|
||||
name string
|
||||
env string
|
||||
port int
|
||||
metricsPort int
|
||||
prometheus *Prometheus
|
||||
postgres Postgres
|
||||
timezone string
|
||||
httpConfig *HttpConfig
|
||||
clientConfigs *ClientConfigs
|
||||
awsConfig *AwsConfig
|
||||
KafkaConfig *KafkaConfig
|
||||
ElasticConfig *ElasticConfig
|
||||
mjolnir *MjolnirClientConfig
|
||||
houston *HoustonClientConfig
|
||||
name string
|
||||
env string
|
||||
port int
|
||||
metricsPort int
|
||||
prometheus *Prometheus
|
||||
postgres Postgres
|
||||
timezone string
|
||||
httpConfig *HttpConfig
|
||||
clientConfigs *ClientConfigs
|
||||
awsConfig *AwsConfig
|
||||
KafkaConfig *KafkaConfig
|
||||
ElasticConfig *ElasticConfig
|
||||
mjolnir *MjolnirClientConfig
|
||||
houston *HoustonClientConfig
|
||||
JobSchedulerConfig *JobSchedulerConfig
|
||||
}
|
||||
|
||||
type MigConfig struct {
|
||||
@@ -37,20 +38,21 @@ func LoadConfig() {
|
||||
readConfig()
|
||||
|
||||
appConfig = AppConfig{
|
||||
name: getString("name", true),
|
||||
env: getString("env", true),
|
||||
port: getInt("port", true),
|
||||
metricsPort: getInt("metrics.port", true),
|
||||
prometheus: GetPrometheusConfig(),
|
||||
postgres: getPostgresConfig(),
|
||||
timezone: getString("timezone", true),
|
||||
clientConfigs: loadClientConfigs(),
|
||||
httpConfig: NewHttpConfig(),
|
||||
awsConfig: NewAWSConfig(),
|
||||
KafkaConfig: NewKafkaConfig(),
|
||||
ElasticConfig: NewElasticConfig(),
|
||||
mjolnir: NewMjolnirConfig(),
|
||||
houston: NewHoustonConfig(),
|
||||
name: getString("name", true),
|
||||
env: getString("env", true),
|
||||
port: getInt("port", true),
|
||||
metricsPort: getInt("metrics.port", true),
|
||||
prometheus: GetPrometheusConfig(),
|
||||
postgres: getPostgresConfig(),
|
||||
timezone: getString("timezone", true),
|
||||
clientConfigs: loadClientConfigs(),
|
||||
httpConfig: NewHttpConfig(),
|
||||
awsConfig: NewAWSConfig(),
|
||||
KafkaConfig: NewKafkaConfig(),
|
||||
ElasticConfig: NewElasticConfig(),
|
||||
mjolnir: NewMjolnirConfig(),
|
||||
houston: NewHoustonConfig(),
|
||||
JobSchedulerConfig: NewJobSchedulerConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,3 +126,7 @@ func GetMjolnirConfig() *MjolnirClientConfig {
|
||||
func GetHoustonConfig() *HoustonClientConfig {
|
||||
return appConfig.houston
|
||||
}
|
||||
|
||||
func GetJobSchedulerConfig() *JobSchedulerConfig {
|
||||
return appConfig.JobSchedulerConfig
|
||||
}
|
||||
|
||||
15
configs/jobScheduler.go
Normal file
15
configs/jobScheduler.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package configs
|
||||
|
||||
type JobSchedulerConfig struct {
|
||||
MaxRetentionDays int
|
||||
}
|
||||
|
||||
func NewJobSchedulerConfig() *JobSchedulerConfig {
|
||||
return &JobSchedulerConfig{
|
||||
MaxRetentionDays: getInt("jobScheduler.retentionInDays", true),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *JobSchedulerConfig) GetMaxRetentionDays() int {
|
||||
return p.MaxRetentionDays
|
||||
}
|
||||
5
go.mod
5
go.mod
@@ -8,10 +8,11 @@ require (
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.5.0
|
||||
github.com/elastic/go-elasticsearch/v8 v8.14.0
|
||||
github.com/gin-contrib/cors v1.7.2
|
||||
github.com/gin-contrib/zap v0.2.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-co-op/gocron/v2 v2.12.4
|
||||
github.com/golang-migrate/migrate/v4 v4.17.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.17.0
|
||||
@@ -50,7 +51,6 @@ require (
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-co-op/gocron/v2 v2.12.4 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
@@ -72,7 +72,6 @@ require (
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
|
||||
@@ -49,6 +49,7 @@ func (el *ElasticSearchClient) DeleteDocuments(searchRequest string) {
|
||||
_, err := el.client.DeleteByQuery(el.Config.Index).Raw(strings.NewReader(searchRequest)).Do(context.TODO())
|
||||
if err != nil {
|
||||
log.Printf("unable to delete documents: %s", err.Error())
|
||||
return
|
||||
}
|
||||
log.Printf("successfully deleted documents: %s", searchRequest)
|
||||
}
|
||||
|
||||
@@ -78,11 +78,11 @@ func InitDependencies() *Dependencies {
|
||||
searchServiceClient := service.NewSearchService(logger, elasticSearch)
|
||||
authService := service.NewAuthService(mjolnirClient)
|
||||
houstonService := service.NewHoustonService(logger, dbClient, kafkaProducer, houstonClient)
|
||||
jobScheduler := jobs.NewJobScheduler(logger, elasticSearch)
|
||||
jobScheduler.ScheduleEsCleanUpJob()
|
||||
jobScheduler.Start()
|
||||
services := initServices(documentServiceClient, projectServiceClient, sourceMapServiceClient, releaseServiceClient, exceptionServiceClient, searchServiceClient, authService)
|
||||
handlers := initHandlers(projectServiceClient, sourceMapServiceClient, releaseServiceClient, exceptionServiceClient, searchServiceClient, houstonService)
|
||||
jobScheduler := jobs.NewJobScheduler(logger, elasticSearch, configs.GetJobSchedulerConfig())
|
||||
jobScheduler.ScheduleEsCleanUpJob()
|
||||
jobScheduler.Start()
|
||||
|
||||
return &Dependencies{
|
||||
Service: services,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package jobs
|
||||
|
||||
import (
|
||||
"cybertron/configs"
|
||||
"cybertron/internal/client/elastic"
|
||||
"cybertron/pkg/log"
|
||||
"cybertron/pkg/utils"
|
||||
@@ -13,9 +14,10 @@ type JobScheduler struct {
|
||||
logger *log.Logger
|
||||
scheduler gocron.Scheduler
|
||||
elasticSearchClient *elastic.ElasticSearchClient
|
||||
config configs.JobSchedulerConfig
|
||||
}
|
||||
|
||||
func NewJobScheduler(logger *log.Logger, elasticSearchClient *elastic.ElasticSearchClient) *JobScheduler {
|
||||
func NewJobScheduler(logger *log.Logger, elasticSearchClient *elastic.ElasticSearchClient, jonSchedulerConfig *configs.JobSchedulerConfig) *JobScheduler {
|
||||
s, err := gocron.NewScheduler()
|
||||
if err != nil {
|
||||
logger.Error("Failed to start scheduler", zap.Error(err))
|
||||
@@ -25,17 +27,23 @@ func NewJobScheduler(logger *log.Logger, elasticSearchClient *elastic.ElasticSea
|
||||
logger: logger,
|
||||
scheduler: s,
|
||||
elasticSearchClient: elasticSearchClient,
|
||||
config: *jonSchedulerConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JobScheduler) ScheduleEsCleanUpJob() {
|
||||
s.scheduler.NewJob(gocron.DurationJob(2*time.Hour), gocron.NewTask(esCleanUpJob, s))
|
||||
s.scheduler.NewJob(gocron.DurationJob(2*time.Second), gocron.NewTask(esCleanUpJob, s))
|
||||
}
|
||||
func esCleanUpJob(s *JobScheduler) {
|
||||
println("running es clean up job ")
|
||||
|
||||
now := time.Now().Unix()
|
||||
// Subtract 7 days (7 days * 24 hours * 60 minutes * 60 seconds)
|
||||
sevenDaysAgo := now - (7 * 24 * 60 * 60)
|
||||
retentionDays := s.config.GetMaxRetentionDays()
|
||||
|
||||
if retentionDays <= 0 {
|
||||
retentionDays = 7
|
||||
}
|
||||
sevenDaysAgo := now - (int64(retentionDays) * 24 * 60 * 60)
|
||||
rangeQuery := utils.CreateRangeQueryForLteString("created_at", sevenDaysAgo)
|
||||
search_query := utils.CreateSearchQuery(rangeQuery)
|
||||
es_query := utils.CreateEsQuery(search_query)
|
||||
|
||||
Reference in New Issue
Block a user