def main(opt: Options): # get cache kanban conn = opt.get_conn() num = opt.get_number() #kanban = conn.get_one_kanban(SERVICE_NAME, num) kanban: Kanban = conn.set_kanban(SERVICE_NAME, num) data_path = f"/var/lib/aion/Data/{SERVICE_NAME}_{num}/data" # open config file config = None with open(os.path.join(CONFIG_PATH, "config.json")) as f: config = json.load(f) result_status = True JST = timezone(timedelta(hours=+9), 'JST') backup_time = datetime.now(JST) backup_dir = os.path.join(data_path, backup_time.strftime('%Y%m%d%H%M%S')) backup_file = config.get('ftp-backup-file') backup_save_path = {} for bf in backup_file: backup_save_path[bf] = os.path.join(backup_dir, bf) host = config.get('ftp-server') user = config.get('ftp-user') passwd = config.get('ftp-passwd') # make backup directory for bsp in backup_save_path.values(): dirname = os.path.dirname(bsp) lprint(f"make backup directory: {dirname}") os.makedirs(dirname, exist_ok=True) ### don't mkdir backup_dir becaus dirs are defined in backup_file ### try: ftpc = FtpClient(host, user, passwd) for bf, bsp in backup_save_path.items(): ftpc.get(bf, bsp) except ftplib.all_errors as error: lprint_exception(f"ftp connection failed : {error}") result_status = False # write backup history to db try: with RobotBackupDB() as db: backup_state = 1 if result_status else 2 # 1:succeeded 2:failed for bsp in backup_save_path.values(): db.set_backup_to_db(bsp, backup_time.strftime('%Y-%m-%d %H:%M:%S'), backup_state) finally: # output after kanban conn.output_kanban( result=True, connection_key="key", output_data_path=data_path, process_number=num, ) return
def pool_template_matching(args_queue, return_queue, templates_data, image_path): matcher = Matcher(templates_data, image_path) while True: exit_status = True return_value = None function_name, kwargs = args_queue.get() try: if function_name == 'set_templates': matcher.set_templates(kwargs['templates_data']) elif function_name == 'run': return_value = matcher.run(kwargs['image_path']) else: raise TemplateMatchingException( 'pool_matching() recieve Invalid function name ({function_name}).' ) except Exception as e: exit_status = False lprint_exception(e) # lprint_exception(e) return_queue.put((exit_status, return_value)) return
def main_with_kanban_itr(opt: Options): lprint("start main_with_kanban_itri()") lprint("DISPLAY: ", os.environ.get('DISPLAY')) conn = opt.get_conn() num = int(opt.get_number()) try: for kanban in conn.get_kanban_itr(SERVICE_NAME, num): metadata = kanban.get_metadata() lprint(metadata) screen_name = metadata.get('screen_name') screenshot_id = metadata.get('screenshot_id') if screen_name is None: screen_name = "NO_NAME" if screenshot_id is None: lprint("no screenshot_id") continue lprint(f"screen_name: {screen_name}") lprint(f"screenshot_id : {screenshot_id}") timestamp = datetime.now().strftime("%Y%m%d%H%M%S") screen = ScreenShotService() screen.shot_and_save(SAVE_DIRECTORY, f"{screen_name}_{timestamp}") screen.write_result(screenshot_id) except Exception as e: lprint_exception(e) finally: pass
def main(opt: Options): conn = opt.get_conn() num = opt.get_number() lprint("DISPLAY: ", os.environ.get('DISPLAY')) try: while True: kanban = conn.get_one_kanban(SERVICE_NAME, num) metadata = kanban.get_metadata() screen_name = metadata.get('screen_name') screenshot_id = metadata.get('screenshot_id') if screen_name is None: screen_name = "NO_NAME" lprint(f"screen_name: {screen_name}") lprint(f"screenshot_id : {screenshot_id}") timestamp = datetime.now().strftime("%Y%m%d%H%M%S") screen = ScreenShotService() if screen.check_new_id(screenshot_id): screen.shot_and_save(SAVE_DIRECTORY, f"{screen_name}_{timestamp}") screen.write_result(screenshot_id) else: lprint("not new id") except KanbanNotFoundError: lprint("kanban not found finish") sys.exit(0) except Exception as e: lprint_exception(e)
def set_backup_to_db(self, backup_save_path, backup_date, backup_state): query = f""" insert into backupfiles(path, date, state) values ('{backup_save_path}', '{backup_date}', {backup_state}) """ ret = self.set_query(query) if not ret: lprint_exception('failed to insert new backup data') else: self.commit_query()
def write_result(self, _id): is_success = 1 if self.save_is_success else 0 save_path = self.save_path if self.save_path else "" with UsbSql(DATABASE) as db: try: db.write_screenshot_save_result(_id, save_path, is_success) lprint(f"write result to mysql: {_id}, {save_path}") except Exception as e: lprint_exception(e) lprint(f"{e} : fail to write result to mysql")
def delete_current_schedules(self, job_ids): job_ids_str = '(' + ','.join(list(set(job_ids))) + ')' query = f""" DELETE FROM Maintenance.jobschedule WHERE job_id IN {job_ids_str}; """ ret = self.set_query(query) if not ret: lprint_exception('failed to delete data') else: self.commit_query()
def shot(self): try: screenshot = gui.screenshot() # NOTE: shot twice to take right screen but reason is unkown screenshot = gui.screenshot() except Exception as e: lprint_exception(e) raise RuntimeError("fail to screenshot ==> shot") return screenshot
def deleteEnvoy(self, name): try: api_response = self.api_instance.delete_namespaced_config_map( name, NAMESPACE) lprint( f"CoreV1Api->delete_namespaced_config_map for {name} is successful." ) except client.rest.ApiException as e: lprint_exception( "Exception when calling CoreV1Api->delete_namespaced_config_map: %s\n" % e)
def deleteCronJob(self, name): try: api_response = self.api_instance.delete_namespaced_cron_job( name, NAMESPACE) lprint( f"BatchV2alpha1Api->delete_namespaced_cron_job for {name} is successful." ) except client.rest.ApiException as e: lprint_exception( "Exception when calling BatchV2alpha1Api->delete_namespaced_cron_job: %s\n" % e)
def shot_and_save(self, path, filename): try: screenshot = self.shot() _path = get_save_path(path) if not _path: raise RuntimeError("not found usb mountpoint") self.save_path = os.path.join(_path, f"{filename}.png") screenshot.save(self.save_path) lprint(f"save screentshot to {self.save_path}") self.is_success = True except Exception as e: lprint_exception(e) self.is_success = False
def set_equipments(self, job_id, equipment_list): query = f""" insert into Maintenance.equipments_has_jobs(job_id, equipment_id) values """ for equipment_id in equipment_list: tmp = "(%d, %d)," % (job_id, equipment_id) query += tmp query = query[:-1] + ";" ret = self.set_query(query) if not ret: lprint_exception('failed to insert data') else: self.commit_query()
def set_schedule(self, project_id, microservice_id, schedule_id, \ job_id, job_name, start_at, stop_at, repeat_type, cron_date ): query = f""" INSERT INTO Maintenance.jobschedule (project_id, microservice_id, schedule_id, job_id, job_name, start_at, stop_at, repeat_type, cron_date) VALUES ({int(project_id)}, {int(microservice_id)}, {int(schedule_id)}, {int(job_id)}, '{job_name}', '{start_at}', '{stop_at}', {int(repeat_type)}, '{cron_date}'); """ ret = self.set_query(query) if not ret: lprint_exception('failed to insert data') else: self.commit_query()
def set_templates(self, templates): new_templates = self._reshape_templates(templates) json_dict = { 'templates': new_templates, } s = Struct() s.update(json_dict) request_data = template_matcning_pb2.Set(template_data=s) try: self.stub.set_templates(request_data) except grpc.RpcError as e: lprint_exception(e) else: lprint("success to send. gRPC status: ", grpc.StatusCode.OK)
def get_matching_result(self, picture_path_list): if picture_path_list is None: print("Error: npy is None") return None request_data = template_matcning_pb2.Matching( picture_file_list=picture_path_list) try: res = self.stub.get_matching_result(request_data) data = MessageToDict(res.data_dict) except grpc.RpcError as e: lprint_exception(e) return None else: return data.get('data')
def search_mount_point(self, path, depth=0): mounts = [] try: for entry in os.scandir(path): if os.path.ismount(entry.path): mounts.append(entry.path) elif entry.is_dir(): if depth < MAX_SEARCH_DEPTH: mounts.extend(self.search_mount_point( entry.path, depth=depth+1)) else: pass except PermissionError as e: lprint_exception(e) return [] return mounts
def createCronJob(self, job_id, name, image, schedule, ms_number, envoy_name): body = None with open('yaml/cronjob.yaml', 'r') as f: body = yaml.full_load(f) aion_home = os.environ.get("AION_HOME") if body: # set cronjob name body['metadata']['name'] = name body['spec']['jobTemplate']['spec']['template']['spec'][ 'containers'][0]['name'] = name # set docker image name body['spec']['jobTemplate']['spec']['template']['spec'][ 'containers'][0]['image'] = f'{image}:latest' # set execute schedule as cron time format body['spec']['schedule'] = schedule # set env env = body['spec']['jobTemplate']['spec']['template']['spec'][ 'containers'][0]['env'] for row in env: if row.get('name') == 'MS_NUMBER': row['value'] = str(ms_number) if row.get('name') == 'AION_HOME': row['value'] = aion_home env.append({'name': 'JOB_ID', 'value': str(job_id)}) # set envoy configmap name volumes = body['spec']['jobTemplate']['spec']['template']['spec'][ 'volumes'] for row in volumes: if row.get('configMap'): row['configMap']['name'] = envoy_name try: api_response = self.api_instance.create_namespaced_cron_job( NAMESPACE, body) lprint( f"BatchV1beta1Api->create_namespaced_cron_job for {name} is successful." ) except client.rest.ApiException as e: lprint_exception( "Exception when calling BatchV1beta1Api->create_namespaced_cron_job: %s\n" % e)
def createEnvoy(self, name): data = None with open('yaml/envoy.yaml', 'r') as f: data = f.read() body = {'data': {'envoy.yaml': data}, 'metadata': {'name': name}} if data: try: api_response = self.api_instance.create_namespaced_config_map( NAMESPACE, body) lprint( f"CoreV1Api->create_namespaced_config_map for {name} is successful." ) except client.rest.ApiException as e: lprint_exception( "Exception when calling CoreV1Api->create_namespaced_config_map: %s\n" % e)
def getCurrentCronjobs(self, microservice_name): match_list = [] try: api_response = self.api_instance.list_cron_job_for_all_namespaces() lprint( f"BatchV1beta1Api->list_cron_job_for_all_namespaces is successful." ) lprint(api_response) for row in api_response.items: lprint(row) name = row.metadata.name if microservice_name in name: match_list.append(name) except client.rest.ApiException as e: lprint_exception( "Exception when calling BatchV1beta1Api->list_cron_job_for_all_namespaces: %s\n" % e) return match_list