Schedule all jobs with Sidekiq

This commit is contained in:
Maikel Linke
2026-03-31 14:53:26 +11:00
parent 80a12db191
commit b61f6ab444
4 changed files with 50 additions and 23 deletions

View File

@@ -7,6 +7,15 @@ redis_connection_settings = {
Sidekiq.configure_server do |config|
config.redis = redis_connection_settings
config.on(:startup) do
# Load schedule file similar to sidekiq/cli.rb loading the main config.
path = File.expand_path("../sidekiq_scheduler.yml", __dir__)
erb = ERB.new(File.read(path), trim_mode: "-")
Sidekiq.schedule =
YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true)
SidekiqScheduler::Scheduler.instance.reload_schedule!
end
end
Sidekiq.configure_client do |config|

View File

@@ -10,11 +10,3 @@ env "MAILTO", ENV["SCHEDULE_NOTIFICATIONS"] if ENV["SCHEDULE_NOTIFICATIONS"]
# If we use -e with a file containing specs, rspec interprets it and filters out our examples
job_type :run_file, "cd :path; :environment_variable=:environment bundle exec script/rails runner :task :output"
every 1.day, at: '2:45am' do
rake 'db2fog:clean' if ENV['S3_BACKUPS_BUCKET']
end
every 4.hours do
rake 'db2fog:backup' if ENV['S3_BACKUPS_BUCKET']
end

View File

@@ -7,18 +7,8 @@
- default
- mailers
:scheduler:
:schedule:
HeartbeatJob:
every: ["5m", first_in: "0s"]
SubscriptionPlacementJob:
every: "5m"
SubscriptionConfirmJob:
every: "5m"
TriggerOrderCyclesToOpenJob:
every: "5m"
OrderCycleClosingJob:
every: "5m"
RakeJob:
args: ["ofn:data:remove_transient_data"]
cron: "30 4 1 * *"
# This config is loaded by sidekiq before dotenv is loading our server config.
# Therefore we load the schedule later. See:
#
# - config/initializers/sidekiq.rb
# - config/sidekiq_scheduler.yml

View File

@@ -0,0 +1,36 @@
# Configure sidekiq-scheduler to run jobs.
#
# - https://github.com/sidekiq-scheduler/sidekiq-scheduler
#
# > Note that every and interval count from when the Sidekiq process (re)starts.
# > So every: '48h' will never run if the Sidekiq process is restarted daily,
# > for example. You can do every: ['48h', first_in: '0s'] to make the job run
# > immediately after a restart, and then have the worker check when it was
# > last run.
#
# Therefore, we use `cron` for jobs that should run at certain times like backups.
HeartbeatJob:
every: ["5m", first_in: "0s"]
SubscriptionPlacementJob:
every: "5m"
SubscriptionConfirmJob:
every: "5m"
TriggerOrderCyclesToOpenJob:
every: "5m"
OrderCycleClosingJob:
every: "5m"
backup:
class: "RakeJob"
args: ["db2fog:backup"]
cron: "0 */4 * * *" # every 4 hours
enabled: <%= ENV.fetch("S3_BACKUPS_BUCKET", false) && true %>
backup_clean:
class: "RakeJob"
args: ["db2fog:clean"]
cron: "45 2 * * *" # every day at 2:45am
enabled: <%= ENV.fetch("S3_BACKUPS_BUCKET", false) && true %>
ofn_clean:
class: "RakeJob"
args: ["ofn:data:remove_transient_data"]
cron: "30 4 1 * *" # every month on the first at 4:30am