class Sidekiq::WorkSet
The WorkSet
stores the work being done by this Sidekiq
cluster. It tracks the process and thread working on each job.
WARNING WARNING WARNING
This is live data that can change every millisecond. If you call size
=> 5 and then expect each
to be called 5 times, you're going to have a bad time.
works = Sidekiq::WorkSet.new works.size => 2 works.each do |process_id, thread_id, work| # process_id is a unique identifier per Sidekiq process # thread_id is a unique identifier per thread # work is a Hash which looks like: # { 'queue' => name, 'run_at' => timestamp, 'payload' => job_hash } # run_at is an epoch Integer. end
Public Instance Methods
each(&block)
click to toggle source
# File lib/sidekiq/api.rb, line 1104 def each(&block) results = [] procs = nil all_works = nil Sidekiq.redis do |conn| procs = conn.sscan_each("processes").to_a.sort all_works = conn.pipelined do |pipeline| procs.each do |key| pipeline.hgetall("#{key}:work") end end end procs.zip(all_works).each do |key, workers| workers.each_pair do |tid, json| next if json.empty? hsh = Sidekiq.load_json(json) p = hsh["payload"] # avoid breaking API, this is a side effect of the JSON optimization in #4316 hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String) results << [key, tid, hsh] end end results.sort_by { |(_, _, hsh)| hsh["run_at"] }.each(&block) end
size()
click to toggle source
Note that size
is only as accurate as Sidekiq's heartbeat, which happens every 5 seconds. It is NOT real-time.
Not very efficient if you have lots of Sidekiq
processes but the alternative is a global counter which can easily get out of sync with crashy processes.
# File lib/sidekiq/api.rb, line 1140 def size Sidekiq.redis do |conn| procs = conn.sscan_each("processes").to_a if procs.empty? 0 else conn.pipelined { |pipeline| procs.each do |key| pipeline.hget(key, "busy") end }.sum(&:to_i) end end end