def put(self, workspace): ws_name = utils.get_workspace(workspace=workspace) activities_path = current_path + \ '/storages/{0}/activities.json'.format(ws_name) if not self.get_activities(workspace=workspace): return { "error": "activities doesn't exist for {0} workspace".format(workspace) } module = request.args.get('module') raw_activities = self.activities for k, v in self.activities.items(): if k == module: raw_activities[k] = [] for item in v: cmd_item = item cmd_item['status'] = "Done" raw_activities[k].append(cmd_item) # rewrite the activities again utils.just_write(activities_path, raw_activities, is_json=True) commands = [x for x in raw_activities[module]] return {'commands': commands}
def get_activities(self, workspace): ws_name = utils.get_workspace(workspace=workspace) activities_path = current_path + \ '/storages/{0}/activities.json'.format(ws_name) self.activities = utils.reading_json(activities_path) if not self.activities: return False return True
def get_commands(options, module): headers['Authorization'] = options['JWT'] workspace = utils.get_workspace(options=options) url = options['REMOTE_API'] + "/api/{0}/routines?module=".format( workspace) + module r = requests.get(url, verify=False, headers=headers) if r.status_code == 200: return json.loads(r.text) return None
def get(self, workspace): # prevent reading secret from config file though API ws_name = utils.get_workspace(workspace=workspace) options_path = current_path + \ '/storages/{0}/options.json'.format(ws_name) secret_things = ['USERNAME', 'PASSWORD', 'BOT_TOKEN', 'GITHUB_API_KEY'] options = utils.reading_json(options_path) for item in secret_things: del options[item] return options
def post(self): # global options data = Configurations.parser.parse_args() options = data['options'] # @TODO add another authen level when settings things from remote # check if credentials is the same on the config file or not if not self.verify(options): return {"error": "Can't not verify to setup config"} # write each workspace seprated folder ws_name = utils.get_workspace(options) utils.make_directory(current_path + '/storages/{0}/'.format(ws_name)) if not os.path.isdir(current_path + '/storages/{0}/'.format(ws_name)): return { "error": "Can not create workspace directory with name {0} ".format( ws_name) } activities_path = current_path + '/storages/{0}/activities.json'.format( ws_name) options_path = current_path + '/storages/{0}/options.json'.format( ws_name) # consider this is settings db utils.just_write(options_path, options, is_json=True) if options.get('FORCE') == "False": old_log = options['WORKSPACE'] + '/log.json' if utils.not_empty_file(old_log) and utils.reading_json(old_log): utils.print_info( "It's already done. use '-f' options to force rerun the module" ) raw_activities = utils.reading_json(options['WORKSPACE'] + '/log.json') utils.just_write(activities_path, raw_activities, is_json=True) return options utils.print_info("Cleaning activities log") # Create skeleton activities based on commands.json commands = utils.reading_json(current_path + '/storages/commands.json') raw_activities = {} for k, v in commands.items(): raw_activities[k] = [] utils.just_write(activities_path, raw_activities, is_json=True) return options
def send_JSON(options, json_body, token=''): headers['Authorization'] = options['JWT'] workspace = utils.get_workspace(options=options) url = options['REMOTE_API'] + "/api/{0}/cmd".format(workspace) # ignore the timeout try: r = requests.post(url, verify=False, headers=headers, json=json_body, timeout=0.1) except: pass
def post(self, workspace): ws_name = utils.get_workspace(workspace=workspace) options_path = current_path + \ '/storages/{0}/options.json'.format(ws_name) self.options = utils.reading_json(options_path) module = request.args.get('module') ws_name = os.path.basename(os.path.normpath(workspace)) ws_name_encode = utils.url_encode(ws_name) utils.print_debug(ws_name) if ws_name in os.listdir(self.options['WORKSPACES']): ws_json = self.options['WORKSPACES'] + "/{0}/log.json".format( ws_name) raw_logs = utils.reading_json(ws_json) elif ws_name_encode in os.listdir(self.options['WORKSPACES']): ws_json = self.options['WORKSPACES'] + "/{0}/log.json".format( utils.url_encode(ws_name)) # utils.print_debug(ws_json_encode) raw_logs = utils.reading_json(ws_json) if raw_logs: all_commands = [] for k in raw_logs.keys(): for item in raw_logs[k]: cmd_item = item cmd_item["module"] = k cmd_item['std_path'] = utils.replace_argument( self.options, item.get('std_path')).replace( self.options['WORKSPACES'], '') cmd_item['output_path'] = utils.replace_argument( self.options, item.get('output_path')).replace( self.options['WORKSPACES'], '') cmd_item["module"] = k all_commands.append(cmd_item) return {"commands": all_commands} else: return { "error": "Not found logs file for {0} workspace".format(ws_name) }
def get(self, workspace): profile = request.args.get('profile') module = request.args.get('module') ws_name = utils.get_workspace(workspace=workspace) # set default profile if profile is None: profile = 'quick' routines = self.get_routine(ws_name, profile) if not routines: return { "error": "options doesn't exist for {0} workspace".format(workspace) } if module is not None: routines = routines.get(module) return {'routines': routines}
def get_dataframe(): if args.run_at == 'local': try: ws = get_workspace() dataset = Dataset.get_by_name(ws, dataset_name) df = dataset.to_pandas_dataframe() print("Get dataset ", dataset_name) except Exception: print("Failed to get dataset ", dataset_name) elif args.run_at == 'remote': try: run = Run.get_context() dataset = run.input_datasets['bearingdata'] df = dataset.to_pandas_dataframe() print("Get dataset ", dataset_name) except Exception: print("Failed to get dataset ", dataset_name) else: print('Unexpected value for run_at argument: ', args.run_at) return df
def get(self, workspace): # get options depend on workspace ws_name = utils.get_workspace(workspace=workspace) options_path = str( BASE_DIR.joinpath('storages/{0}/options.json'.format(ws_name))) self.options = utils.reading_json(options_path) module = request.args.get('module') ws_name = os.path.basename(os.path.normpath(workspace)) if ws_name in os.listdir(self.options['WORKSPACES']): ws_json = self.options['WORKSPACES'] + \ "/{0}/log.json".format(ws_name) if os.path.isfile(ws_json): raw_logs = utils.reading_json(ws_json) log = raw_logs for key in raw_logs.keys(): for i in range(len(raw_logs[key])): log[key][i]['std_path'] = utils.replace_argument( self.options, raw_logs[key][i].get('std_path')).replace( self.options['WORKSPACES'], '') log[key][i]['output_path'] = utils.replace_argument( self.options, raw_logs[key][i].get('output_path')).replace( self.options['WORKSPACES'], '') if module: cmds = log.get(module) return {'commands': cmds} else: return log return 'Custom 404 here', 404
from azureml.core import Workspace, Dataset, Experiment, Run from azureml.core.compute import AmlCompute, ComputeTarget from azureml.core.compute_target import ComputeTargetException from azureml.train.dnn import TensorFlow from azureml.widgets import RunDetails import os from utils import get_workspace ws = get_workspace() cluster_name = "bbacompute" dataset_name = "bearing_dataset" dataset = Dataset.get_by_name(ws, dataset_name) try: cluster = ComputeTarget(workspace=ws, name=cluster_name) print("cluster exist: ", cluster_name) except ComputeTargetException: compute_config = AmlCompute.provisioning_configuration(vm_size="standard_d12_v2", max_nodes=1) cluster = ComputeTarget.create(ws, cluster_name, compute_config) cluster.wait_for_completion(show_output=True) exp_name = "exp_bearing_anomaly_lstm" experiment = Experiment(ws, name=exp_name) estimator = TensorFlow( source_directory='.', entry_script='lstm.py', script_params={'--run_at': 'remote'}, inputs=[dataset.as_named_input('bearingdata')], compute_target=cluster,
def post(self, workspace): ws_name = utils.get_workspace(workspace=workspace) options_path = current_path + \ '/storages/{0}/options.json'.format(ws_name) self.options = utils.reading_json(options_path) data = Cmd.parser.parse_args() cmd = data['cmd'] std_path = data['std_path'] output_path = data['output_path'] module = data['module'] nolog = data['nolog'] activity = { 'cmd': cmd, 'std_path': std_path, 'output_path': output_path, 'status': 'Running' } if nolog == 'False': activities_path = current_path + '/storages/{0}/activities.json'.format( ws_name) # activities = utils.reading_json(activities_path) activities = utils.reading_json(activities_path) if activities.get(module): activities[module].append(activity) else: activities[module] = [activity] utils.just_write(activities_path, activities, is_json=True) slack.slack_noti('log', self.options, mess={ 'title': "{0} | {1} | Execute".format( self.options['TARGET'], module), 'content': '```{0}```'.format(cmd), }) utils.print_info("Execute: {0} ".format(cmd)) stdout = execute.run(cmd) utils.check_output(output_path) # just ignore for testing purpose # stdout = "<< stdoutput >> << {0} >>".format(cmd) if nolog == 'False': # change status of log activities = utils.reading_json(activities_path) for item in activities[module]: if item['cmd'] == cmd: if stdout is None: item['status'] = 'Error' else: item['status'] = 'Done' try: if std_path != '': utils.just_write(std_path, stdout) slack.slack_file( 'std', self.options, mess={ 'title': "{0} | {1} | std".format( self.options['TARGET'], module), 'filename': '{0}'.format(std_path), }) if output_path != '': slack.slack_file( 'verbose-report', self.options, mess={ 'channel': self.options['VERBOSE_REPORT_CHANNEL'], 'filename': output_path }) except: pass utils.just_write(activities_path, activities, is_json=True) return jsonify(status="200", output_path=output_path)