def run(cls, experiment_run: ExperimentClass, author: User) -> Experiment: experiment, created = Experiment.objects.get_or_create( name=experiment_run.name, author=author) experiment.save() run = Run(uuid=experiment_run.run_id, experiment=experiment) run.save() for m in experiment_run.metrics: metric = Metric(name=m.name, value=m.value, run=run) metric.save() for p in experiment_run.parameters: parameter = Parameter(name=p.name, value=p.value, run=run) parameter.save() for m in experiment_run.measurements: measurement = Count(run=run) measurement.save() for entry_key, entry_value in m.value.items(): entry = CountEntry(key=entry_key, value=entry_value, measurement=measurement) entry.save() return experiment
def adapt_run_from_request(cls, request: dict, author: User) -> Run: experiment_name = request.get("name") experiment, created = Experiment.objects.get_or_create( name=experiment_name, author=author) experiment.save() run_id = request.get("run_id") run = Run(run_id=run_id, experiment=experiment, description=request.get("description", ""), timestamp=request.get("timestamp", int(time.time()))) run.save() for sv in request.get("state_vectors", []): state_vector = StateVector(name=sv.get("name"), run=run) state_vector.save() for real, img in sv.get("vector", []): complex_number = ComplexNumber(real=real, img=img, state_vector=state_vector) complex_number.save() for m in request.get("metrics", []): metric = Metric(name=m.get("name"), value=m.get("value"), timestamp=m.get("timestamp"), run=run) metric.save() for p in request.get("parameters", []): parameter = Parameter(name=p.get("name"), value=p.get("value"), timestamp=p.get("timestamp"), run=run) parameter.save() for c in request.get("counts", []): count = Count(name=c.get("name"), run=run) count.save() for k, v in c.get("value", {}).items(): count_entry = CountEntry(key=k, value=v, count=count) count_entry.save() return run
def _create_stubbed_experiments( n_experiments: int, n_runs: int = 2, n_metrics: int = 2, n_parameters: int = 2, n_measurements: int = 2, author: Optional[User] = None) -> List[Experiment]: """ Creates experiments for tests. """ if not author: author, _ = User.objects.get_or_create(username="******", password="******") now = int(time.time()) experiments = [] for i in range(n_experiments): experiment = Experiment(name="Experiment #{}".format(i), author=author) experiment.save() for j in range(n_runs): run = Run(run_id="run #{}".format(j), experiment=experiment, timestamp=now) run.save() for m in range(n_metrics): metric = Metric(name="Metric {}".format(m), value=0.1, timestamp=now, run=run) metric.save() for p in range(n_parameters): parameter = Parameter(name="Parameter {}".format(p), value=0.1, timestamp=now, run=run) parameter.save() for m in range(n_measurements): count = Count(run=run, name="counts") count.save() measurement_entry = CountEntry(key="00", value=1024, count=count) measurement_entry.save() run.save() experiment.save() experiments.append(experiment) return experiments
def upload_log_zip(self, request): base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) zip_root = '' try: # Access S3 bucket via the boto3 library. Credentials stored in the env file s3 = boto3.resource( 's3', aws_access_key_id=config('AWS_ACCESS_KEY'), aws_secret_access_key=config('AWS_SECRET_ACCESS_KEY')) # Write the request bytes to destination of 'upload.zip' with open('upload.zip', 'wb+') as destination: for chunk in request.FILES['file'].chunks(): destination.write(chunk) # Open and begin processing the uploaded files with ZipFile('upload.zip', 'r') as upload: # Extract the zip file to access the files upload.extractall() # The log files will be under a common 'root' directory zip_root = upload.namelist()[0] # Walk through the upper most directory for root, directories, files in os.walk( os.path.join(base_dir, '../' + zip_root)): for directory in directories: # At this point, dir_root contains the path of zip root and directory for dir_root, dirs, dir_files in os.walk( os.path.join(base_dir, '../' + zip_root + directory)): # Iterate through each file in the zip files for dir_file in dir_files: # We are only interested in processing and storing the moos, alog, and script files # We want to store raw versions of these types of files in the S3 bucket if '._moos' in dir_file: # Store raw file in S3 # Open the file as binary data with open( os.path.join( base_dir, dir_root + '/' + dir_file), 'rb') as file_data: # Place the file in the bucket s3.Bucket( 'swarm-logs-bucket').put_object( Key='{}{}{}'.format( zip_root, directory + '/', dir_file), Body=file_data) # If the file is .alog it needs to be parsed into json and stored in the db if '.alog' in dir_file: # Store in S3 bucket with open( os.path.join( base_dir, dir_root + '/' + dir_file), 'rb') as file_data: # Place the un-parsed file in the bucket s3.Bucket( 'swarm-logs-bucket').put_object( Key='{}{}{}'.format( zip_root, directory + '/', dir_file), Body=file_data) # Parse into json # Web parser return json objects that contain metadata for the log and run objects # Basically only what you need to put in the database, and enough to get the files on the S3 json_obj, runs_obj = parsers.web_parser( os.path.join( base_dir, dir_root + '/' + dir_file)) index_json_obj = json.loads(json_obj) index_runs = json.loads(runs_obj) # Create pieces of objects to store them in the DB device_id = index_json_obj['device_id'] file_path = zip_root + directory + '/' + dir_file + '.json' # print(file_path) date = index_json_obj['date'] time = index_json_obj['time'] # TODO specify timezone date_time = datetime.strptime( date + ' ' + time, '%d-%m-%Y %H:%M:%S') # Create the log object first, so it can be used in the run objects log_obj = Log(dateTime=date_time, deviceID=device_id, filePath=file_path) log_obj.save() # Iterate through the returned runs and store each in the DB for i in index_runs: run_id = i['run_id'] # This is the filepath the will be on the bucket run_fp = zip_root + directory + '/' + dir_file + f'-run{run_id}.json' # Save the run data to db run_obj = Run(dateTime=date_time, deviceID=device_id, runID=run_id, logID=log_obj, filePath=run_fp) run_obj.save() run_file_path = os.path.join( base_dir, dir_root + '/' + dir_file + f'-run{run_id}.json') # Upload run json to bucket with open(run_file_path, 'rb') as run_file: s3.Bucket( 'swarm-logs-bucket' ).put_object( Key='{}{}{}'.format( zip_root, directory + '/', run_file.name.split( '/')[-1]), Body=run_file) # Upload the script files to the bucket if 'Narwhal' in run_file_path: run_script_path = run_file_path.replace( '.json', '') + '.script' with open( run_script_path, 'rb') as script_file: s3.Bucket( 'swarm-logs-bucket' ).put_object( Key='{}{}{}'.format( zip_root, directory + '/', script_file.name. split('/')[-1]), Body=script_file) script_file.seek(0) s3.Bucket( 'swarm-robotics-visualization' ).put_object( Key='scripts/{}{}{}'. format( zip_root, directory + '/', script_file.name. split('/')[-1]), Body=script_file) # Open and place the parsed json file in the bucket with open( os.path.join( base_dir, dir_root + '/' + dir_file + '.json'), 'rb') as json_file: s3.Bucket( 'swarm-logs-bucket').put_object( Key='{}{}{}'.format( zip_root, directory + '/', json_file.name.split('/') [-1]), Body=json_file) except Exception as e: return Response({"Status": "Upload Failed. {}".format(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: # Return the 200 response return Response({"Status": "Uploaded Successfully."}, status=status.HTTP_200_OK) finally: # Clean up the files and directories that get created try: os.remove(os.path.join(base_dir, '../upload.zip')) except OSError as error: print('Error removing upload.zip \n' + error) if zip_root != '': shutil.rmtree(os.path.join(base_dir, '../' + zip_root)) # Walk the directory above to make sure the __MACOSX directory gets deleted if it is created for root, directories, files in os.walk( os.path.join(base_dir, '../')): if '__MACOSX' in directories: shutil.rmtree(os.path.join(base_dir, '../__MACOSX')) break