class Fog::AWS::Storage::File

Constants

MAX_SINGLE_PUT_SIZE

@deprecated use {Fog::AWS::Storage::MAX_SINGLE_PUT_SIZE} instead

MIN_MULTIPART_CHUNK_SIZE

@deprecated use {Fog::AWS::Storage::MIN_MULTIPART_CHUNK_SIZE} instead

MULTIPART_COPY_THRESHOLD

@deprecated not used for anything

UploadPartData

Attributes

body[W]
multipart_chunk_size[R]

@note Chunk size to use for multipart uploads.

Use small chunk sizes to minimize memory. E.g. 5242880 = 5mb

Public Instance Methods

acl() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 86
def acl
  requires :directory, :key
  service.get_object_acl(directory.key, key).body['AccessControlList']
end
acl=(new_acl) click to toggle source

Set file's access control list (ACL).

valid acls: private, public-read, public-read-write, authenticated-read, bucket-owner-read, bucket-owner-full-control

@param [String] new_acl one of valid options @return [String] @acl

# File lib/fog/aws/models/storage/file.rb, line 98
def acl=(new_acl)
  valid_acls = ['private', 'public-read', 'public-read-write', 'authenticated-read', 'bucket-owner-read', 'bucket-owner-full-control']
  unless valid_acls.include?(new_acl)
    raise ArgumentError.new("acl must be one of [#{valid_acls.join(', ')}]")
  end
  @acl = new_acl
end
body() click to toggle source

Get file's body if exists, else ''.

@return [File]

# File lib/fog/aws/models/storage/file.rb, line 110
def body
  return attributes[:body] if attributes.key?(:body)

  file = collection.get(identity)

  attributes[:body] =
    if file
      file.body
    else
      ''
    end
end
concurrency() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 82
def concurrency
  @concurrency || 1
end
concurrency=(concurrency) click to toggle source

@note Number of threads used to copy files.

# File lib/fog/aws/models/storage/file.rb, line 76
def concurrency=(concurrency)
  raise ArgumentError.new('minimum concurrency is 1') if concurrency.to_i < 1

  @concurrency = concurrency.to_i
end
copy(target_directory_key, target_file_key, options = {}) click to toggle source

Copy object from one bucket to other bucket.

required attributes: directory, key

@param target_directory_key [String] @param target_file_key [String] @param options [Hash] options for copy_object method @return [String] Fog::AWS::Files#head status of directory contents

# File lib/fog/aws/models/storage/file.rb, line 149
def copy(target_directory_key, target_file_key, options = {})
  requires :directory, :key

  self.multipart_chunk_size = service.max_copy_chunk_size if multipart_chunk_size.nil?

  if multipart_chunk_size > 0 && self.content_length.to_i >= multipart_chunk_size
    upload_part_options = options.select { |key, _| ALLOWED_UPLOAD_PART_OPTIONS.include?(key.to_sym) }
    upload_part_options = upload_part_options.merge({ 'x-amz-copy-source' => "#{directory.key}/#{key}" })
    multipart_copy(options, upload_part_options, target_directory_key, target_file_key)
  else
    service.copy_object(directory.key, key, target_directory_key, target_file_key, options)
  end

  target_directory = service.directories.new(:key => target_directory_key)
  target_directory.files.head(target_file_key)
end
destroy(options = {}) click to toggle source

Destroy file via http DELETE.

required attributes: directory, key

@param options [Hash] @option options versionId [] @return [Boolean] true if successful

# File lib/fog/aws/models/storage/file.rb, line 174
def destroy(options = {})
  requires :directory, :key
  attributes[:body] = nil if options['versionId'] == version
  service.delete_object(directory.key, key, options)
  true
end
directory() click to toggle source

Get the file instance's directory.

@return [Fog::AWS::Storage::Directory]

# File lib/fog/aws/models/storage/file.rb, line 136
def directory
  @directory
end
metadata() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 182
def metadata
  attributes.reject {|key, value| !(key.to_s =~ /^x-amz-/)}
end
metadata=(new_metadata) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 187
def metadata=(new_metadata)
  merge_attributes(new_metadata)
end
multipart_chunk_size=(mp_chunk_size) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 70
def multipart_chunk_size=(mp_chunk_size)
  service.validate_chunk_size(mp_chunk_size, 'multipart_chunk_size')
  @multipart_chunk_size = mp_chunk_size
end
owner=(new_owner) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 192
def owner=(new_owner)
  if new_owner
    attributes[:owner] = {
      :display_name => new_owner['DisplayName'] || new_owner[:display_name],
      :id           => new_owner['ID'] || new_owner[:id]
    }
  end
end
public=(new_public) click to toggle source

Set Access-Control-List permissions.

valid new_publics: public_read, private

@param [String] new_public @return [String] new_public

# File lib/fog/aws/models/storage/file.rb, line 212
def public=(new_public)
  if new_public
    @acl = 'public-read'
  else
    @acl = 'private'
  end
  new_public
end
public?() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 201
def public?
  acl.any? {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'}
end
public_url() click to toggle source

Get publicly accessible url via http GET. Checks permissions before creating. Defaults to s3 subdomain or compliant bucket name

required attributes: directory, key

@return [String] public url

# File lib/fog/aws/models/storage/file.rb, line 229
def public_url
  requires :directory, :key
  if public?
    service.request_url(
      :bucket_name => directory.key,
      :object_name => key
    )
  else
    nil
  end
end
save(options = {}) click to toggle source

Save file with body as contents to directory.key with name key via http PUT

required attributes: body, directory, key

@param [Hash] options @option options [String] acl sets x-amz-acl HTTP header. Valid values include, private | public-read | public-read-write | authenticated-read | bucket-owner-read | bucket-owner-full-control @option options [String] cache_control sets Cache-Control header. For example, 'No-cache' @option options [String] content_disposition sets Content-Disposition HTTP header. For exampple, 'attachment; filename=testing.txt' @option options [String] content_encoding sets Content-Encoding HTTP header. For example, 'x-gzip' @option options [String] content_md5 sets Content-MD5. For example, '79054025255fb1a26e4bc422aef54eb4' @option options [String] content_type Content-Type. For example, 'text/plain' @option options [String] expires sets number of seconds before AWS Object expires. @option options [String] storage_class sets x-amz-storage-class HTTP header. Defaults to 'STANDARD'. Or, 'REDUCED_REDUNDANCY' @option options [String] encryption sets HTTP encryption header. Set to 'AES256' to encrypt files at rest on S3 @option options [String] tags sets x-amz-tagging HTTP header. For example, 'Org-Id=1' or 'Org-Id=1&Service=MyService' @option options [String] website_redirect_location sets x-amz-website-redirect-location HTTP header. For example, 'website_redirect_location=www.rubydoc.info/github/fog/fog-aws' @return [Boolean] true if no errors

# File lib/fog/aws/models/storage/file.rb, line 259
def save(options = {})
  requires :body, :directory, :key
  if options != {}
    Fog::Logger.deprecation("options param is deprecated, use acl= instead [light_black](#{caller.first})[/]")
  end
  options['x-amz-acl'] ||= @acl if @acl
  options['Cache-Control'] = cache_control if cache_control
  options['Content-Disposition'] = content_disposition if content_disposition
  options['Content-Encoding'] = content_encoding if content_encoding
  options['Content-MD5'] = content_md5 if content_md5
  options['Content-Type'] = content_type if content_type
  options['Expires'] = expires if expires
  options.merge!(metadata)
  options['x-amz-storage-class'] = storage_class if storage_class
  options['x-amz-tagging'] = tags if tags
  options['x-amz-website-redirect-location'] = website_redirect_location if website_redirect_location
  options.merge!(encryption_headers)

  self.multipart_chunk_size = service.max_put_chunk_size if multipart_chunk_size.nil?
  if multipart_chunk_size > 0 && Fog::Storage.get_body_size(body) >= multipart_chunk_size && body.respond_to?(:read)
    data = multipart_save(options)
    merge_attributes(data.body)
  else
    data = service.put_object(directory.key, key, body, options)
    merge_attributes(data.headers.reject {|key, value| ['Content-Length', 'Content-Type'].include?(key)})
  end
  self.etag = self.etag.gsub('"','') if self.etag
  self.content_length = Fog::Storage.get_body_size(body)
  self.content_type ||= Fog::Storage.get_content_type(body)
  true
end
url(expires, options = {}) click to toggle source

Get a url for file.

required attributes: key

@param expires [String] number of seconds (since 1970-01-01 00:00) before url expires @param options [Hash] @return [String] url

# File lib/fog/aws/models/storage/file.rb, line 299
def url(expires, options = {})
  requires :key
  collection.get_url(key, expires, options)
end
versions() click to toggle source

File version if exists or creates new version. @return [Fog::AWS::Storage::Version]

# File lib/fog/aws/models/storage/file.rb, line 307
def versions
  @versions ||= begin
    Fog::AWS::Storage::Versions.new(
      :file         => self,
      :service   => service
    )
  end
end

Private Instance Methods

create_part_list(upload_part_options) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 405
def create_part_list(upload_part_options)
  current_pos = 0
  count = 0
  pending = []

  while current_pos < self.content_length do
    start_pos = current_pos
    end_pos = [current_pos + self.multipart_chunk_size, self.content_length - 1].min
    range = "bytes=#{start_pos}-#{end_pos}"
    part_options = upload_part_options.dup
    part_options['x-amz-copy-source-range'] = range
    pending << UploadPartData.new(count + 1, part_options, nil)
    count += 1
    current_pos = end_pos + 1
  end

  pending
end
directory=(new_directory) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 318
def directory=(new_directory)
  @directory = new_directory
end
encryption_customer_key_headers() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 397
def encryption_customer_key_headers
  {
    'x-amz-server-side-encryption-customer-algorithm' => encryption,
    'x-amz-server-side-encryption-customer-key' => Base64.encode64(encryption_key.to_s).chomp!,
    'x-amz-server-side-encryption-customer-key-md5' => Base64.encode64(OpenSSL::Digest::MD5.digest(encryption_key.to_s)).chomp!
  }
end
encryption_headers() click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 380
def encryption_headers
  if encryption && encryption_key
    encryption_customer_key_headers
  elsif encryption
    { 'x-amz-server-side-encryption' => encryption, 'x-amz-server-side-encryption-aws-kms-key-id' => kms_key_id }.reject {|_, value| value.nil?}
  else
    {}
  end
end
multipart_copy(options, upload_part_options, target_directory_key, target_file_key) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 356
def multipart_copy(options, upload_part_options, target_directory_key, target_file_key)
  # Initiate the upload
  res = service.initiate_multipart_upload(target_directory_key, target_file_key, options)
  upload_id = res.body["UploadId"]

  # Store ETags of upload parts
  part_tags = []
  pending = PartList.new(create_part_list(upload_part_options))
  thread_count = self.concurrency
  completed = PartList.new
  errors = upload_in_threads(target_directory_key, target_file_key, upload_id, pending, completed, thread_count)

  raise errors.first if errors.any?

  part_tags = completed.to_a.sort_by { |part| part.part_number }.map(&:etag)
rescue => e
  # Abort the upload & reraise
  service.abort_multipart_upload(target_directory_key, target_file_key, upload_id) if upload_id
  raise
else
  # Complete the upload
  service.complete_multipart_upload(target_directory_key, target_file_key, upload_id, part_tags)
end
multipart_save(options) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 322
def multipart_save(options)
  # Initiate the upload
  res = service.initiate_multipart_upload(directory.key, key, options)
  upload_id = res.body["UploadId"]

  # Store ETags of upload parts
  part_tags = []

  # Upload each part
  # TODO: optionally upload chunks in parallel using threads
  # (may cause network performance problems with many small chunks)
  # TODO: Support large chunk sizes without reading the chunk into memory
  if body.respond_to?(:rewind)
    body.rewind  rescue nil
  end
  while (chunk = body.read(multipart_chunk_size)) do
    part_upload = service.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk, part_headers(chunk, options))
    part_tags << part_upload.headers["ETag"]
  end

  if part_tags.empty? #it is an error to have a multipart upload with no parts
    part_upload = service.upload_part(directory.key, key, upload_id, 1, '', part_headers('', options))
    part_tags << part_upload.headers["ETag"]
  end

rescue
  # Abort the upload & reraise
  service.abort_multipart_upload(directory.key, key, upload_id) if upload_id
  raise
else
  # Complete the upload
  service.complete_multipart_upload(directory.key, key, upload_id, part_tags)
end
part_headers(chunk, options) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 390
def part_headers(chunk, options)
  md5 = Base64.encode64(OpenSSL::Digest::MD5.digest(chunk)).strip
  encryption_keys = encryption_customer_key_headers.keys
  encryption_headers = options.select { |key| encryption_keys.include?(key) }
  { 'Content-MD5' => md5 }.merge(encryption_headers)
end
upload_in_threads(target_directory_key, target_file_key, upload_id, pending, completed, thread_count) click to toggle source
# File lib/fog/aws/models/storage/file.rb, line 424
def upload_in_threads(target_directory_key, target_file_key, upload_id, pending, completed, thread_count)
  threads = []

  thread_count.times do
    thread = Thread.new do
      begin
        while part = pending.shift
          part_upload = service.upload_part_copy(target_directory_key, target_file_key, upload_id, part.part_number, part.upload_options)
          part.etag = part_upload.body['ETag']
          completed.push(part)
        end
      rescue => error
        pending.clear!
        error
      end
    end

    thread.abort_on_exception = true
    threads << thread
  end

  threads.map(&:value).compact
end