class Sidekiq::WorkSet

The WorkSet stores the work being done by this Sidekiq cluster. It tracks the process and thread working on each job.

WARNING WARNING WARNING

This is live data that can change every millisecond. If you call size => 5 and then expect each to be called 5 times, you're going to have a bad time.

works = Sidekiq::WorkSet.new
works.size => 2
works.each do |process_id, thread_id, work|
  # process_id is a unique identifier per Sidekiq process
  # thread_id is a unique identifier per thread
  # work is a Hash which looks like:
  # { 'queue' => name, 'run_at' => timestamp, 'payload' => job_hash }
  # run_at is an epoch Integer.
end

Public Instance Methods

each(&block) click to toggle source
# File lib/sidekiq/api.rb, line 954
def each(&block)
  results = []
  Sidekiq.redis do |conn|
    procs = conn.sscan_each("processes").to_a
    procs.sort.each do |key|
      valid, workers = conn.pipelined {
        conn.exists?(key)
        conn.hgetall("#{key}:workers")
      }
      next unless valid
      workers.each_pair do |tid, json|
        hsh = Sidekiq.load_json(json)
        p = hsh["payload"]
        # avoid breaking API, this is a side effect of the JSON optimization in #4316
        hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
        results << [key, tid, hsh]
      end
    end
  end

  results.sort_by { |(_, _, hsh)| hsh["run_at"] }.each(&block)
end
size() click to toggle source

Note that size is only as accurate as Sidekiq's heartbeat, which happens every 5 seconds. It is NOT real-time.

Not very efficient if you have lots of Sidekiq processes but the alternative is a global counter which can easily get out of sync with crashy processes.

# File lib/sidekiq/api.rb, line 983
def size
  Sidekiq.redis do |conn|
    procs = conn.sscan_each("processes").to_a
    if procs.empty?
      0
    else
      conn.pipelined {
        procs.each do |key|
          conn.hget(key, "busy")
        end
      }.sum(&:to_i)
    end
  end
end