def run(self): try: profile = self.args.profile if self.args.profile else DEFAULT_PROFILE if self.args.bucket: set_bucket(self.args.bucket) if self.args.ACCESS_KEY and self.args.SECRET_KEY: set_profile(profile, DEFAULT_REGION, self.args.ACCESS_KEY, self.args.SECRET_KEY) # check new profile if profile_exists(profile): user_profile = get_profile(profile) aws = Aws(user_profile) if aws.is_valid_credentials(): return True, 'Valid credentials' + ('' if aws.is_user else ' for admin use') else: return False, 'Invalid credentials' else: return False, f'Profile \'{profile}\' not found. Please run config command with your access keys' except Exception as e: return False, format_err(e, 'config')
def run(self): if self.args.b: # list all areas in bucket if self.aws.is_user: return False, 'You don\'t have permission to use this command' try: folder_count = 0 for area in self.list_bucket_areas(): k = area["key"] print(k, end=' ') p = '' if 'perms' in area: p = area.get('perms') or '' print(p.ljust(3), end=' ') if 'name' in area: n = area.get('name') print(f'{n}' if n else '', end=' ') print() folder_count += 1 print_count(folder_count) return True, None except Exception as e: return False, format_err(e, 'list') else: # list selected area contents selected_area = get_selected_area() if not selected_area: return False, 'No area selected' try: selected_area += '' if selected_area.endswith('/') else '/' file_count = 0 for k in self.list_area_contents(selected_area): print(k) if not k.endswith('/'): file_count += 1 print_count(file_count) return True, None except Exception as e: return False, format_err(e, 'list')
def run(self): if not self.aws: return False, 'You need configure your profile first' if self.aws.is_user: return False, 'You don\'t have permission to use this command' area_name = self.args.NAME perms = self.args.p # optional str, default 'ux' # generate random uuid prefix for area name area_id = gen_uuid() try: metadata = {'name': area_name, 'perms': perms} s3_client = self.aws.common_session.client('s3') s3_client.put_object(Bucket=self.aws.bucket_name, Key=(area_id + '/'), Metadata=metadata) # get bucket policy s3_resource = self.aws.common_session.resource('s3') try: bucket_policy = s3_resource.BucketPolicy(self.aws.bucket_name) policy_str = bucket_policy.policy except ClientError: policy_str = '' if policy_str: policy_json = json.loads(policy_str) else: # no bucket policy policy_json = json.loads('{ "Version": "2012-10-17", "Statement": [] }') # add new statement for dir to existing bucket policy new_statement = new_policy_statement(self.aws.bucket_name, area_id, perms) policy_json['Statement'].append(new_statement) updated_policy = json.dumps(policy_json) bucket_policy.put(Policy=updated_policy) return True, 'Created upload area with UUID ' + area_id + ' and name ' + area_name except Exception as e: return False, format_err(e, 'create')
def run(self): try: if self.args.AREA: key = self.args.AREA if self.args.AREA.endswith( '/') else f'{self.args.AREA}/' if self.aws.obj_exists(key): set_selected_area(key) return True, f'Selected upload area is {key}' else: return False, "Upload area does not exist" else: selected_area = get_selected_area() if selected_area: return True, 'Currently selected upload area is ' + get_selected_area( ) else: return False, 'No upload area currently selected' except Exception as e: return False, format_err(e, 'select')
def run(self): selected_area = get_selected_area() if not selected_area: return False, 'No area selected' try: s3_resource = self.aws.common_session.resource('s3') bucket = s3_resource.Bucket(self.aws.bucket_name) # choice 1 all_files = self.args.a # optional bool fs = [] if all_files: # download all files from selected area for obj in bucket.objects.filter(Prefix=selected_area): # skip the top-level directory if obj.key == selected_area: continue fs.append( FileTransfer(path=os.getcwd(), key=obj.key, size=obj.size)) else: # choice 2 # download specified file(s) only for f in self.args.f: # check if f exists key = f'{selected_area}{f}' try: # if you're able to download (s3:GetObject) you can do HEAD Object which # is used by resource.ObjectSummary obj_summary = s3_resource.ObjectSummary( self.aws.bucket_name, key) obj_size = obj_summary.size except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": fs.append( FileTransfer(path=os.getcwd(), key=key, status='File not found.', complete=True)) elif e.response['Error']['Code'] == "403": # An error occurred (403) when calling the HeadObject operation: Forbidden fs.append( FileTransfer(path=os.getcwd(), key=key, status='Access denied.', complete=True)) else: # Something else has gone wrong. fs.append( FileTransfer(path=os.getcwd(), key=key, status='Download error.', complete=True)) else: fs.append( FileTransfer(path=os.getcwd(), key=key, size=obj_size)) def download(idx): try: file = fs[idx].key os.makedirs(os.path.dirname(file), exist_ok=True) s3 = self.aws.new_session().resource('s3') s3.Bucket(self.aws.bucket_name).download_file( file, file, Callback=TransferProgress(fs[idx])) # if file size is 0, callback will likely never be called # and complete will not change to True # hack if fs[idx].size == 0: fs[idx].status = 'Empty file.' fs[idx].complete = True fs[idx].successful = True except Exception as thread_ex: if 'Forbidden' in str(thread_ex) or 'AccessDenied' in str( thread_ex): fs[idx].status = 'Access denied.' else: fs[idx].status = 'Download failed.' fs[idx].complete = True fs[idx].successful = False print('Downloading...') transfer(download, fs) self.files = [f for f in fs if f.successful] if all([f.successful for f in fs]): return True, 'Successful download.' else: return False, 'Failed download.' except Exception as e: return False, format_err(e, 'download')
def run(self): selected_area = get_selected_area() if not selected_area: return False, 'No area selected' try: if self.args.d: # delete area if self.aws.is_user: return False, 'You don\'t have permission to use this command' confirm = input(f'Confirm delete upload area {selected_area}? Y/y to proceed: ') if confirm.lower() == 'y': print('Deleting...') deleted_keys = self.delete_upload_area(selected_area, incl_selected_area=True) for k in deleted_keys: print(k) # delete bucket policy for user-folder permissions # only admin who has perms to set policy can do this self.clear_area_perms_from_bucket_policy(selected_area) # clear selected area CmdArea.clear(False) return True, None if self.args.a: # delete all files confirm = input(f'Confirm delete all contents from {selected_area}? Y/y to proceed: ') if confirm.lower() == 'y': print('Deleting...') deleted_keys = self.delete_upload_area(selected_area, incl_selected_area=False) for k in deleted_keys: print(k) return True, None if self.args.PATH: # list of files and dirs to delete print('Deleting...') for p in self.args.PATH: # you may have perm x but not d (to load or even do a head object) # so use obj_exists prefix = selected_area + p keys = self.all_keys(prefix) if keys: for k in keys: self.delete_s3_object(k) print(k + ' Done.') else: print(prefix + ' File not found.') return True, None else: return False, 'No path specified' except Exception as e: return False, format_err(e, 'delete')
def run(self): selected_area = get_selected_area() if not selected_area: return False, 'No area selected' try: # filter out any duplicate path after expansion # . -> curent drectory # ~ -> user home directory ps = [] for p in self.args.PATH: p = os.path.abspath( p ) # Normalize a pathname by collapsing redundant separators and up-level references so that A//B, A/B/, A/./B and A/foo/../B all become A/B. if not p in ps: ps.append(p) # create list of files to upload fs = [] max_depth = 1 # default if DIR_SUPPORT and self.args.r: max_depth = MAX_DIR_DEPTH exclude = lambda f: f.startswith('.') or f.startswith('__') def get_files(upload_path, curr_path, level): if level < max_depth: # skip files deeper than max depth level += 1 for f in os.listdir(curr_path): full_path = os.path.join(curr_path, f) # skip hidden files and dirs if not exclude(f): if os.path.isfile(full_path): f_size = os.path.getsize(full_path) rel_path = full_path.replace( upload_path + ('' if upload_path.endswith('/') else '/'), '') fs.append( FileTransfer(path=full_path, key=rel_path, size=f_size)) elif os.path.isdir(full_path): get_files(upload_path, full_path, level) for p in ps: if os.path.isfile( p ): # explicitly specified files, whether hidden or starts with '__' not skipped f_size = os.path.getsize(p) f_name = os.path.basename(p) fs.append(FileTransfer(path=p, key=f_name, size=f_size)) elif os.path.isdir(p): # recursively handle dir upload get_files(p, p, 0) def upload(idx): try: key = selected_area + fs[idx].key # creating a new session for each file upload/thread, as it's unclear whether they're # thread-safe or not sess = self.aws.new_session() if not self.args.o and self.aws.obj_exists(key): fs[idx].status = 'File exists. Use -o to overwrite.' fs[idx].successful = True fs[idx].complete = True else: res = sess.resource('s3') # upload_file automatically handles multipart uploads via the S3 Transfer Manager # put_object maps to the low-level S3 API request, it does not handle multipart uploads res.Bucket(self.aws.bucket_name).upload_file( Filename=fs[idx].path, Key=key, Callback=TransferProgress(fs[idx])) # if file size is 0, callback will likely never be called # and complete will not change to True # hack if fs[idx].size == 0: fs[idx].status = 'Empty file.' fs[idx].complete = True fs[idx].successful = True except Exception as thread_ex: fs[idx].status = 'Upload failed.' fs[idx].complete = True fs[idx].successful = False print('Uploading...') transfer(upload, fs) self.files = [f for f in fs if f.successful] if all([f.successful for f in fs]): return True, 'Successful upload.' else: return False, 'Failed upload.' except Exception as e: return False, format_err(e, 'upload')