def test_aws_worker(): if not os.path.isfile(os.path.join(HERE, 'config.yml')): pytest.skip("Only for local tests for now") ramp_kit_dir = os.path.join(HERE, 'kits', 'iris') # make sure predictio and log dirs exist, if not, add them add_empty_dir(os.path.join(ramp_kit_dir, 'predictions')) add_empty_dir(os.path.join(ramp_kit_dir, 'logs')) # if the prediction / log files are still there, remove them for subdir in os.listdir(os.path.join(ramp_kit_dir, 'predictions')): if os.path.isdir(subdir): shutil.rmtree(subdir) for subdir in os.listdir(os.path.join(ramp_kit_dir, 'logs')): if os.path.isdir(subdir): shutil.rmtree(subdir) config = read_config(os.path.join(HERE, 'config.yml')) worker_config = generate_worker_config(config) worker = AWSWorker(worker_config, submission='starting_kit_local') worker.setup() assert worker.status == 'setup' worker.launch_submission() assert worker.status in ('running', 'finished') worker.collect_results() assert worker.status == 'collected' assert os.path.isdir( os.path.join(ramp_kit_dir, 'predictions', 'starting_kit_local', 'fold_0')) assert os.path.isfile( os.path.join(ramp_kit_dir, 'logs', 'starting_kit_local', 'log')) worker.teardown() assert worker.status == 'killed'
def test_launch_ec2_instances_put_back_into_queue(test_launch_ec2_instances, caplog): ''' checks if the retry status and the correct log is added if the api returns None instances and status retry ''' test_launch_ec2_instances.return_value = None, 'retry' # setup the AWS worker event_config = read_config(ramp_aws_config_template())['worker'] worker = AWSWorker(event_config, submission='starting_kit_local') worker.config = event_config # worker should be put back into the queue worker.setup() assert worker.status == 'retry' assert 'Adding it back to the queue and will try again' in caplog.text
def test_aws_worker_upload_error(test_launch_ec2_instances, test_rsync, caplog): # mock dummy AWS instance class DummyInstance: id = 1 test_launch_ec2_instances.return_value = (DummyInstance(),), 0 # mock the called process error test_rsync.side_effect = subprocess.CalledProcessError(255, 'test') # setup the AWS worker event_config = read_config(ramp_aws_config_template())['worker'] worker = AWSWorker(event_config, submission='starting_kit_local') worker.config = event_config # CalledProcessError is thrown inside worker.setup() assert worker.status == 'error' assert 'Unable to connect during log download' in caplog.text