def update_goal(): weightgoal = request.form['weightgoal'] try: goodgoal = (float(request.form['goodgoal']) * .01) except (ValueError) as error: logging.debut('PROFILE_POST: Failed weight update with error {}'.format(error)) flash('Need weight and health in number format.', 'danger') return testweight = session.query(Goals).filter_by(user_id=current_user.id).first() if not testweight: if weightgoal and goodgoal > 0: newgoals = Goals(user_id=current_user.id, weight_goal=weightgoal, health_goal=goodgoal) session.add(newgoals) session.commit() return redirect(url_for('profile')) else: flash('Need both a weight goal and health percentage.', 'danger') else: session.query(Goals) \ .filter_by(user_id=current_user.id) \ .update({"weight_goal": weightgoal}) session.query(Goals).filter_by(user_id=current_user.id).update({"health_goal": goodgoal}) session.commit()
def displayImageByFileName(fileName, panel): if not os.path.isfile(PHOTOS_PATH + '/%s' % fileName): logging.debut('File donot exist!') return if not (fileName.endswith('.jpg') or fileName.endswith('.JPG')): return pictureName = PHOTOS_PATH + '/%s' % fileName changeImageOnPanel(pictureName, panel)
def try_to_recover_task(self, task, task_id): process_logging_filename = task.get_process_logging_path(task_id) if not os.path.isfile(process_logging_filename): # Nothing to do here return False # Check if task is already running try: with open(process_logging_filename) as f: d = literal_eval(f.read()) pid = d['pid'] process = psutil.Process(pid) # Recover instance rqmt = d['requested_resources'] logpath = os.path.relpath(task.path(gs.JOB_LOG_ENGINE)) call_with_id = gs.SIS_COMMAND + [ 'worker', os.path.relpath(task.path()), task.name(), str(task_id) ] call_with_id += ['--redirect_output'] name = task.task_name() task_name = task.name() task_instance = TaskQueueInstance(call_with_id, logpath, rqmt, name, task_name, task_id) if call_with_id != process.cmdline()[1:]: logging.debug('Job changed, ignore this job: %i %s %s' % (pid, process.cmdline(), task_instance.call)) return False if os.path.abspath(os.getcwd()) != process.cwd(): logging.debut('Job changed, ignore this job: %i %s %s' % (pid, os.getcwd(), process.cwd())) return False with self.running_tasks as running_tasks: name = (task_instance.name, task_id) running_tasks[name] = (process, task_instance) self.reserve_resources(rqmt) logging.debug('Loaded job: %i %s %s' % (pid, process.cmdline(), task_instance.call)) return True except Exception as e: logging.debug('Failed to load running job: %s' % e) return False
class PartnerAPI: def __init__( self , partner_name , url , id_batches , num_threads = 35 ): self.name = partner_name self.url = url self.batches = id_batches # These are dictionaries. Key--Name of the Batch. Values--List of tracking numbers. self.num_threads = num_threads def partner_url( self , tracking_ids ): """ Input: List of tracking ids Output: Target API url Can be called alone or within a queue. """ partner_url = self.url target_url = partner_url + tracking_ids[0] for i in tracking_ids[1:]: target_url += tracking_ids[i] return target_url def call_api( self , target_url ): """ Input: Target API url (with shipment info) Output: API returns a list of json events. One for each shipment. Can be called alone or within a queue. """ with requests.Session() as s: retries = Retry( total=2 , backoff_factor=0.3 ) s.mount( target_url , requests.adapters.HTTPAdapter( max_retries = retries ) ) results = s.get( target ) s.close() shipments = results.json() return shipments def parse_data( self , api_response_fn ): """ Input: Filename of api response pickle Output: List of dataframes of shipment events Can be called alone or within a queue. """ with open( api_response_fn ) as file: shipments = pickle.load( file ) # List of Shipments parsed = [] for parcel in shipments: try: tracking_id = parcel['Tracking_ID'] status = parcel['Parcel_Status'] try: events = parcel['Event_Scans'] except: events = None if events: events_df = pd.DataFrame( events ) events_df['TrackingID'] = tracking_id events_df['Status'] = status # Parse Datetime: if 'T' in events['Datetime']: events_df['Datetime'] = events_df.TimeStamp.apply( lambda x: dt.strptime( x[:19], '%Y-%m-%dT%H:%M:%S') ) else: events_df['Datetime'] = pd.to_datetime( events_df['Datetime'] ) else: # No "Event_Scans" found. logging.debut( f"""No event scans found for {parcel['Tracking_ID]}""" ) except: events_df = pd.DataFrame() events_df['Status'] = 'RRD API Response: ' + parcel['Message'] logging.debug( f"""Error parsing API for shipment in {api_response_fn}.""" ) if tracking_id: parsed.append( events_df ) else: continue return parsed
time.sleep(4) elif pyautogui.locateOnScreen('ok2.png'): logging.debug('"ok2.png located on screen') clickpoint = pyautogui.center(pyautogui.locateOnScreen('ok2.png')) if clickpoint[0] > window_edge: logging.debug('"ok2.png" is in operated screen area') pyautogui.click(clickpoint) logging.info( 'Point clicked at coord: %s , sleeping for 4 seconds' % str(clickpoint)) time.sleep(4) elif pyautogui.locateOnScreen('captcha.png') != None: logging.info('Captcha located on screen') clickpoint = pyautogui.center(pyautogui.locateOnScreen('captcha.png')) if clickpoint[0] > window_edge: logging.debut('captcha in operated screen area') if captcha_fail > 3 and not captcha_sent: logging.info('Sending captcha to C&C, captcha_fail = %d' % captcha_fail) parted_scrn = scrn.crop((window_edge, 0, width, height)) parted_scrn.save('tmp_parted.png') #send captcha to other humans to solve it # commcon.send_to_cc('tmp_parted.png', categ='captcha') #### #the above function will return after dealing with the captcha #so no need to sleep etc #captcha_sent=True #time.sleep(300) continue elif captcha_fail > 2: logging.debug( 'captcha_fail = %d, user being informed of captcha' %