def longjob(pk): for i in range(20): print 'longjob', i sleep(0.5) current_task.update_state(state='PROGRESS', meta = {'progress': (i*100.0) / 20}) current_task.update_state(state='PROGRESS', meta = {'progress': 100}) return 'all done'
def update_state(self, state): """ This function update the state of the running thread. This update this metadata: - elapsed: Time elapsed so far. - total_set: Frequents items. - rules: Rules ordered by conf and sup. """ if current_task: current_task.update_state(state=state, meta={ 'elapsed': find_now(self.start), 'c1': json.dumps(self.c1, cls=SetEncoder), 'total_set': json.dumps(self.total_set, cls=SetEncoder), 'rules': json.dumps( sorted(self.rules, key=operator.itemgetter( 1, 2), reverse=True)) })
def run_rscript(script, args, cwd=app.config['R_SCRIPT_PATH']): current_task.update_state(state='PENDING', meta={'stage': 'running report'}) cmd_line = ['Rscript', os.path.join(app.config['R_SCRIPT_PATH'], script)] cmd_line.extend(args) print("Running Rscript: '%s'\n" % ' '.join(cmd_line)) proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() (stdout, stderr) = proc.communicate() print(stdout.decode("utf-8")) if proc.returncode != 0: msg = str(stderr.decode("utf-8")) if "Execution halted" in msg: print("Got error in Rscript:\n" + msg) msg = msg.split('\n') errors = [] for line in msg: if 'rror' in line: errors.append(line) raise BadRequest('Error running report: %s' % '\n'.join(errors)) return True
def add(n): result = 0 for adder in range(1, n): result += adder status = 'added till {} the is result {}'.format(adder, result) current_task.update_state(state="PROGRESS", meta={'status': status}) return result
def process_update(progress, percent, name, is_celery=False): if is_celery is True: current_task.update_state(state=progress, meta={ c.CELERY_PRO_PERCENT: percent, c.PRO_NAME: name })
def file_downloader(file_url, file_location): resp = requests.get(file_url, stream=True) total = resp.headers.get('content-length') with open(file_location, 'wb') as f: if total is None: f.write(resp.content) else: downloaded = 0 total = int(total) total_file_size = str(round((int(total) / 1024) / 1024, 2)) + ' MB' for data in resp.iter_content( chunk_size=max(int(total / 1000), 1024 * 1024)): downloaded += len(data) f.write(data) done_percent = (100 * downloaded) / total done_percent_str = str(round(done_percent, 2)) + ' %' done_size = (done_percent * total) / 100 done_size_str = str(round( (int(done_size) / 1024) / 1024, 2)) + ' MB' rem_size = ((100 - done_percent) * total) / 100 rem_size_str = str(round( (int(rem_size) / 1024) / 1024, 2)) + ' MB' current_task.update_state(state='PROGRESS', meta={ 'file_name': file_location, 'status': done_percent_str, 'file_size': total_file_size, 'downloaded_file_size': done_size_str, 'remaining_file_size': rem_size_str, })
def launch_sync_rando(*args, **kwargs): """ celery shared task - sync rando command """ if not os.path.exists(settings.SYNC_RANDO_ROOT): os.mkdir(settings.SYNC_RANDO_ROOT) print 'Sync rando started' try: current_task.update_state(state='PROGRESS', meta={ 'name': current_task.name, 'current': 5, 'total': 100, 'infos': u"{}".format(_(u"Init sync ...")) }) call_command('sync_rando', settings.SYNC_RANDO_ROOT, url=kwargs.get('url'), verbosity='2', task=current_task, **settings.SYNC_RANDO_OPTIONS) except Exception: raise print 'Sync rando ended' return { 'name': current_task.name, }
def get_followers_n(username,twitter_token,twitter_secret, user_token,user_secret): print("tasks.py") api = twitter.Api(consumer_key=twitter_token,consumer_secret=twitter_secret, access_token_key=user_token,access_token_secret=user_secret) user = api.GetUser(screen_name=username) total = user.followers_count followers = api.GetFollowers() print(user) followers = [i.AsDict() for i in followers] user_name = [] followers_n = [] for i in followers: user = api.GetUser(screen_name=i['screen_name']) user_name.append(user) print(user) n = user.followers_count print(n) followers_n.append(n) process_percent = int(100 * float(i) / float(total)) current_task.update_state(state='PROGRESS', meta={'process_percent':process_percent}) if len(user_name) == len(followers_count): dicts = {} for i in range(len(user_name)): dict[user_name[i]] = followers_n[i] return dicts else: return "Can't fetch followers"
def long_task(): while(1): a=1 a=a+1 sleep(0.2) current_task.update_state(state= "PROGRESS", meta= {'current': a,'total': 100})
def run_yolo(self, filename: str) -> None: filepath = Path(IMAGE_DIRECTORY) / self.request.id / filename current_task.update_state(state="PROGRESS", meta={"progress": 0.1}) # Get detections from image detections = detect_objects(yolo_net, yolo_labels, yolo_layers, yolo_colors, filepath) current_task.update_state(state="PROGRESS", meta={"progress": 0.7}) detections = list(filter(lambda d: d["label"] in VEHICLE_CLASSES, detections)) # Save image with detections drawn detections_img = draw_detections(filepath, detections) save_path = filepath.parent / "detections.jpg" cv.imwrite(str(save_path), detections_img) detections_img_sm = image_resize(detections_img) save_path = filepath.parent / "thumbs" / "detections.jpg" save_path.parent.mkdir() cv.imwrite(str(save_path), detections_img_sm) # Save image of cropped detections with JSON with the details of detection save_directory = filepath.parent / "objects" if not save_directory.exists(): save_directory.mkdir() (save_directory / "thumbs").mkdir() for num, obj in enumerate(detections): (save_directory / f"{num+1}.json").write_text(json.dumps(obj)) image = crop_detection(filepath, obj) cv.imwrite(str(save_directory / f"{num+1}.jpg"), image) # TODO fix cv2 error from some non-jpg image_sm = image_resize(image) cv.imwrite(str(save_directory / "thumbs" / f"{num+1}.jpg"), image_sm) return detections
def runmodules(hostlist=""): import datetime import django, os, sys os.environ["DJANGO_SETTINGS_MODULE"] = "Kraken.settings" sys.path.append("/opt/Kraken") django.setup() from Web_Scout.models import Hosts from importlib import import_module import datetime if not hostlist: hostlist = Hosts.objects.exclude(Module__exact='') total_count = len(hostlist) LogKrakenEvent('Celery', 'Running modules on ' + str(total_count) + ' hosts.', 'info') start_time = datetime.datetime.now() jobs = group(runmodule.s(host.HostID) for host in hostlist) result = jobs.apply_async() while not result.ready(): print 'Failed Tasks? ' + str(result.failed()) print 'Waiting? ' + str(result.waiting()) print 'Completed: ' + str(result.completed_count()) print 'Total: ' + str(total_count) process_percent = int((result.completed_count() / total_count) * 100) sleep(.1) print 'Percentage Complete: ' + str(process_percent) + '%' current_task.update_state(state='PROGRESS', meta={'process_percent': process_percent }) sleep(5) end_time = datetime.datetime.now() total_time = end_time - start_time LogKrakenEvent('Celery', 'Mass Module Execution Complete. Elapsed time: ' + str(total_time) + ' to test ' + str(total_count) + ' interfaces', 'info')
def tsk_track_category(category_id, category_name): """ :param category_id: :param category_name: :return: """ # check if category has been tracked in last 24h category_playlists = redis_get_json(f'playlist_{category_id}') if category_playlists is {}: # fill er up pass else: try: last = category_playlists['last_updated'] if (datetime.today()) - (datetime.strptime( last, "%Y-%m-%d %H:%M:%S")).days < 1: return "No need to update, already run in last 24h" except (KeyError, ValueError): pass # need to start update playlists = tracker.get_playlists_for_category(category_id) current_task.update_state(state='LOAD PLAYLISTS', meta={ 'status': f'loading playlists ({len(playlists)})', 'category': f'{category_name}' }) task_name = make_task_name('tsk_track_playlist') for playlist in playlists: celery_app.send_task(task_name, args=[playlist['id'] ]) # need to attach as child when done
def successor(self, state): y = self.symetrie/1000000 current_task.update_state(state='PROGRESS', meta={'current': y, 'total': 100}) for valeur1 in state: temp=state[:] temp=temp[0:temp.index(valeur1)]+temp[temp.index(valeur1)+1:len(temp)] for valeur2 in temp: if len(valeur2)==1: if not valeur1+valeur2 in self.dico: self.dico[valeur1+valeur2]=1 check=possible(valeur1,valeur2,self.taille) if check[0]: newmove=temp[:] newmove=newmove[0:newmove.index(valeur2)]+newmove[newmove.index(valeur2)+1:len(newmove)] newmove=newmove+(valeur1+valeur2,) if check[1]>self.taille: self.taille=check[1] self.solution=check[2] elif check[1]==self.taille: for seq in check[2]: self.solution.append(seq) etape=valeur1+' '+'+'+' '+valeur2+' '+'='+' '+valeur1+valeur2 yield (etape,newmove) else: x = len(valeur1+valeur2) self.symetrie+=ast(x) else: x = len(valeur1+valeur2) self.symetrie+=ast(x)
def execute_testcases(submission_file, submission_id, problem): base_dir = Path(settings.MEDIA_ROOT).resolve() source_path = base_dir / submission_file testcases = TestCase.objects.filter(problem=problem) response = {} for testcase in testcases: testcase_path = base_dir / testcase.testcase.name output_path = base_dir / 'user_output' / (str(submission_id) + str(testcase.id)) engine = Engine(source_path=source_path, testcase_path=testcase_path, output_path=output_path) try: id = testcase.id engine.process() except Engine.CompileError: response[id] = 'error' except Engine.TimeOut: response[id] = 'TLE' else: if engine.check_output(output_path): response[id] = 'AC' else: response[id] = 'WA' current_task.update_state(state="PROGRESS", meta=response) return response
def compile_sketch_task(task_id, data): try: current_task.update_state(state='PROGRESS', meta={'current_process': 'Saving Files'}) filenames = saveFiles(data) current_task.update_state( state='PROGRESS', meta={'current_process': 'Starting Compiling'}) output = CompileINO(filenames) if isinstance(output, bool): current_task.update_state(state='FAILURE', meta={ 'exc_type': 'Compilation Error', 'exc_message': 'Server Error' }) return {'error': True} else: current_task.update_state(state='PROGRESS', meta={'current_process': 'Done'}) return output except Exception as e: current_task.update_state(state='FAILURE', meta={ 'exc_type': type(e).__name__, 'exc_message': traceback.format_exc() }) print(traceback.format_exc()) return {'error': True}
def get_student_course_stats_base(request,course, type="grades"): """ Called by get_student_course_stats and get_student_problem_stats Gets a list of users in a course, and then computes grades for them request - a mock request (using RequestDict) course - a string course id type - whether to get student weighted grades or unweighted grades. If "grades" will get weighted """ fs, db = common.get_db_and_fs_cron(common.student_course_stats_stub) course_obj = get_course_with_access(request.user, course, 'load', depth=None) users_in_course = StudentModule.objects.filter(course_id=course).values('student').distinct() users_in_course_ids = [u['student'] for u in users_in_course] log.debug("Users in course count: {0}".format(len(users_in_course_ids))) courseware_summaries = [] for i in xrange(0,len(users_in_course_ids)): try: user = users_in_course_ids[i] current_task.update_state(state='PROGRESS', meta={'current': i, 'total': len(users_in_course_ids)}) student = User.objects.using('remote').prefetch_related("groups").get(id=int(user)) model_data_cache = None if type=="grades": grade_summary = grades.grade(student, request, course_obj, model_data_cache) else: grade_summary = grades.progress_summary(student, request, course_obj, model_data_cache) courseware_summaries.append(grade_summary) except: log.exception("Could not generate data for {0}".format(users_in_course_ids[i])) return courseware_summaries, users_in_course_ids
def update_status(state, meta=None): """ Update task status if running by a worker, else do nothing """ direct_call = current_task.request.id is None if not direct_call: current_task.update_state(state=state, meta=meta)
def acceptHosts(data): log.debug("Inside accepthosts Async Task %s" % data) current_task.update_state(state='STARTED') if 'nodes' in data: nodelist = data['nodes'] else: log.info("Node List is empty") raise Exception( data, "Node List is empty, Accept Hosts failed") minionIds, failedNodes = usm_rest_utils.setup_transport_and_update_db( nodelist) successNodes = [item for item in nodelist if item not in failedNodes] # Discover the Disks from the nodes. if not usm_rest_utils.discover_disks( successNodes): log.critical("Disvovery of disks failed") if failedNodes: return {'state': 'FAILURE', 'failednodes': str(failedNodes), 'reason': 'Accept Failed for few hosts'} return {'state': 'SUCCESS'}
def update_async_status(self, state, meta): if not current_task: return current_task.update_state( state=state, meta=meta, )
def update_progress(task, percent): print(task, percent) current_task.update_state(state='PROGRESS', meta={ 'task': task, 'percent': percent })
def createGlusterVolume(data): log.debug("Inside createGlusterVolume Async Task %s" % data) current_task.update_state(state='STARTED') if 'bricks' in data: bricklist = data['bricks'] del data['bricks'] # Return from here if bricklist is empty if len(bricklist) == 0: log.info("Brick List is empty, Volume creation failed") raise usm_rest_utils.VolumeCreationFailed( bricklist, "Brick List is empty, Volume creation failed") hostlist = Host.objects.filter(cluster_id=str(data['cluster'])) # Prepare the disks for brick creation failed = usm_rest_utils.create_gluster_brick(bricklist) log.critical("Brick creation failed for bricks: %s" % str(failed)) # Remove the failed bricks from bricklist bricks = [item for item in bricklist if item not in failed] # Return from here if bricks are empty if len(bricks) == 0: log.info("Brick List is empty, Brick creation failed") raise usm_rest_utils.VolumeCreationFailed( bricklist, "Brick Creation failed") log.debug("Creating the volume with bricks %s", str(bricks)) # create the Volume try: rc = usm_rest_utils.create_gluster_volume(data, bricks, hostlist) if rc is False: log.debug("Creating the volume failed") raise usm_rest_utils.VolumeCreationFailed(data, str(bricks)) except usm_rest_utils.VolumeCreationFailed, e: log.exception(e) raise
def update_progress(self, i, file_path): progress_percent = int(100 * float(i) / float(self.files_count)) current_task.update_state(state='PROGRESS', meta={ 'status': progress_percent, 'file': os.path.basename(file_path) })
def process_item(id: int,quantity: int, price: float) -> str: # for i in range(1, 11): current_task.update_state(state='PROGRESS', meta={'process_percent': 10}) print(333333333333) return f"finished processing item: {id} | {quantity} | {price}"
def download_task(url, savePath, taskMeta=None): try: import urllib2 file_name = url.split('/')[-1] u = urllib2.urlopen(url) header = u.info().getheader('Content-Length') file_size = int(header.strip()) if os.path.exists(os.path.join(savePath, file_name)) and int(os.stat(os.path.join(savePath, file_name)).st_size) == file_size: print "file exists and downloaded: " + file_name return [os.path.join(savePath, file_name),taskMeta] f = open(os.path.join(savePath, file_name), 'wb') print "Downloading: %s Bytes: %s" % (file_name, file_size) file_size_dl = 0 block_sz = 81920 while True: buffer = u.read(block_sz) if not buffer: break current_task.update_state(state='PROGRESS', meta={'current': int(file_size_dl * 100 / file_size) , 'total': 100}) file_size_dl += len(buffer) f.write(buffer) f.close() except: traceback.print_exc() taskMeta.state = State.DOWNLOADFAILED taskMeta.state = State.DOWNLOADED return [os.path.join(savePath, file_name),taskMeta]
def update_task_info(state, meta=None): """ If running as task, update the task state, else return directly """ if current_task.request.id is not None: current_task.update_state(state=(state[:35] + '..' + state[-10:]) if len(state) > 49 else state, meta=meta)
def scheduled_task() -> str: print("Task started") for i in range(1, 11): sleep(1) current_task.update_state(state='PROGRESS', meta={'process_percent': i * 10}) return {"message": "Hello world!!!"}
def test_celery(word: str) -> str: print("Task started") for i in range(1, 11): sleep(1) current_task.update_state(state='PROGRESS', meta={'process_percent': i * 10}) return {"message": word}
def slow_proc(): NTOTAL = 10 for i in range(NTOTAL): time.sleep(random.random()) current_task.update_state(state='PROGRESS', meta={'current':i,'total':NTOTAL}) return 999
def commit_rows(file_type, rows, chunk_size): bulk_mgr = BulkCreateManager(chunk_size=chunk_size) if file_type == 'payment': total_rows = len(rows) i = 1 for row in rows: payment_detail = row['Details'].split() customer_name = ''.join(payment_detail[4:]) if payment_detail[0].lower() == 'credit': payment_mode = PaymentMode(payment_type=1, payment_source=payment_detail[2], amount=row['Paid In']) payment_mode.save() elif payment_detail[0].lower() == 'mpesa': payment_mode = PaymentMode(payment_type=0, payment_source=payment_detail[2], amount=row['Paid In']) payment_mode.save() bulk_mgr.add( PaymentDetail(payment_reference=row['Receipt No.'], customer_name=customer_name, payment_mode=payment_mode, account_number=row['A/C No.'])) bulk_mgr.done() percent = int(100 * float(i) / float(total_rows)) i = i + 1 current_task.update_state(state='PROGRESS', meta={'process_percent': percent}) return True
def run_ug(user_group): """ Main celery task for running tests :param user_group: User group object containing all options :return: None """ buff[0] = 0 buff[1] = 0 script_module = load_script(user_group.script_file) threads = [] fh = logging.FileHandler(user_group.output_dir + 'results.csv') logger.addHandler(fh) current_task.update_state(state='RUNNING', meta={'trans': buff[0], 'errors': buff[1]}) for i in range(user_group.thread_num): space = float(user_group.rampup) / float(user_group.thread_num) if i > 0: time.sleep(space) agent_thread = Agent(user_group.process_num, i, user_group.start_time, user_group.run_time, user_group.group_name, script_module, user_group.script_file, user_group.console_logging) agent_thread.daemon = True threads.append(agent_thread) agent_thread.start() while len([t for t in threads if t.is_alive()]) > 0: current_task.update_state(state='RUNNING', meta={'trans': buff[0], 'errors': buff[1]}) time.sleep(1)
def flickr(id, lat, lon): FLICKR_API_KEY = keys['keys']['flickr_api_key'] FLICKR_SECRET_API_KEY = keys['keys']['flickr_api_key'] device1 = Device.objects.get(id=id) flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, FLICKR_SECRET_API_KEY) try: photo_list = flickr.photos.search(api_key=FLICKR_API_KEY, lat=lat, lon=lon, accuracy=16, format='parsed-json', per_page=100, extras='url_l,geo', has_geo=1, sort='newest') except Exception as e: print(e.args) total = 100 for counter, photo in enumerate(photo_list['photos']['photo']): if 'url_l' in photo: flickr_db = FlickrNearby(device=device1, lat=str(photo['latitude']), lon=str(photo['longitude']), title=photo['title'], url=photo['url_l']) flickr_db.save() print(counter) current_task.update_state(state='PROGRESS', meta={'current': counter, 'total': total, 'percent': int((float(counter) / total) * 100)}) return {'current': total, 'total': total, 'percent': 100}
def run_wpod(self, filename: str, makePrediction: bool = False) -> None: filepath = Path(IMAGE_DIRECTORY) / self.request.id / filename current_task.update_state(state="PROGRESS", meta={"progress": 0.1}) plateImg, cor = get_plate(wpod_net, filepath) # XXX can raise AssertionError if no plate is found vehicleImg = draw_box(filepath, cor) # XXX Currently only handling one plate image from array of found images img_float32 = np.float32(plateImg[0]) plateImg = cv.cvtColor(img_float32, cv.COLOR_RGB2BGR) img_float32 = np.float32(vehicleImg) vehicleImg = cv.cvtColor(img_float32, cv.COLOR_RGB2BGR) plateFile = filepath.parent / "plate.jpg" cv.imwrite(str(plateFile), plateImg * 255) cv.imwrite(str(filepath.parent / "vehicle.jpg"), vehicleImg * 255) # plateImgSm = image_resize(plateImg) # cv.imwrite(str(plateFile.parent / "thumbs" / "plate.jpg"), plateImgSm * 255) vehicleImgSm = image_resize(vehicleImg) (filepath.parent / "thumbs").mkdir() cv.imwrite(str(filepath.parent / "thumbs" / "vehicle.jpg"), vehicleImgSm * 255) if makePrediction: prediction = get_prediction(ocr_net, ocr_labels, str(plateFile)) return prediction
def create_schema(total_entries, pk): schema = Schema.objects.get(pk=pk) fields = SchemaField.objects.filter(schema=schema) for i in range(total_entries): for field in fields: value = "" if field.type == 'Name': value = get_random_string(random.randint(3, 20), string.ascii_letters) + ' ' + \ get_random_string(random.randint(3, 20), string.ascii_letters) elif field.type == 'Email': value = '*****@*****.**' % get_random_string(20, string.ascii_letters) elif field.type == 'Age': value = random.randint(1, 120) elif field.type == 'Phone': n = '0000000000' while '9' in n[3:6] or n[3:6] == '000' or n[6] == n[7] == n[8] == n[9]: n = str(random.randint(10 ** 9, 10 ** 10 - 1)) value = n[:3] + '-' + n[3:6] + '-' + n[6:] elif field.type == 'Range Integer': value = random.randint(field.start, field.end) row = FieldRow.objects.create(data=value, field=field) row.save() current_task.update_state(state='PROGRESS', meta={'current': i, 'total': total_entries, 'percent': int((float(i) / total_entries) * 100)}) return {'current': total_entries, 'total': total_entries, 'percent': 100}
def process_file_select(data): task_name = data["name"] if isinstance(data.get("cache_duration"), int): cache_duration = data["cache_duration"] else: cache_duration = config('CACHE_DURATION', default=86400, cast=int) if data.get("autodownload") == True or data.get("autodownload") == False: autodownload = data["autodownload"] else: autodownload = config('AUTODOWNLOAD_FILE', default=True, cast=bool) status = "complete" try: pipeline = image_pipeline(data) if pipeline is None: raise Exception() except: current_task.update_state(state="failure", meta='image processing failed') status = "failed" try: tasks = Tasks() task_data = tasks.read_file(task_name) if not task_data: return {"task": task_name, "status": "cancelled"} task_data["status"] = status tasks.write_file(task_data) except: return { "task": task_name, "status": status, "autodownload": autodownload } return {"task": task_name, "status": status, "autodownload": autodownload}
def pdf_annotation_pipeline(bot_names, data): # makes it here! log.info("STARTING PIPELINE (made it to annotation_pipeline)") # DEBUG current_task.update_state(state='PROGRESS', meta={ 'process_percentage': 78, 'task': 'starting annotation pipeline' }) for bot_name in bot_names: log.info("STARTING {} BOT (annotation_pipeline)".format(bot_name)) log.debug("Sending doc to {} for annotation...".format( bots[bot_name].__class__.__name__)) current_task.update_state(state='PROGRESS', meta={ 'process_percentage': 79, 'task': friendly_bots[bot_name] }) data = bots[bot_name].pdf_annotate(data) log.debug("{} done!".format(bots[bot_name].__class__.__name__)) log.info("COMPLETED {} BOT (annotation_pipeline)".format(bot_name)) # current_task.update_state(state='PROGRESS',meta={'process_percentage': 79, 'task': 'Bot {} complete!'.format(bot_name)}) log.info("running inference...") return data
def svn_update_log(user, password, path, limit, logfile, *args): current_task.update_state(state="PROGRESS") repo = Svnrepo(path, user, password) repo.svn_update("all", *args) res = repo.svn_get_reversion(logfile, limit) return res
def report_progress(progress, comment): current_task.update_state(state='PROGRESS', meta={ 'state': 'RUNNING', 'progress': int(progress), 'comment': comment })
def transcribe(item): url = item.audio_url if item.audio_url else item.video_url uuid = koemei.upload_direct(url) route = koemei.request_transcription(uuid) while True: status, progress, data = koemei.transcription_status(route) logger.info("{}: {}%".format(status, progress * 100)) if data is not None: break current_task.update_state( state='PROGRESS', meta={ 'progress': progress, 'eta': None, 'time': None, 'duration': None }) time.sleep(5 * 60) transcript = reader.read(data) raw_files = [dict(content_type='text/xml', file_name='koemei.transcript.xml', body=ET.tostring(data))] save_transcription(item, clips = transcript['clips'], speakers = transcript['speakers'], engine = current_task.name, raw_files = raw_files, logger = logger)
def createCephCluster(cluster_data): log.debug("Inside createCephCluster Async Task") current_task.update_state(state='STARTED') log.debug(cluster_data) nodelist = [] failedNodes = [] noOfNodes = 0 if 'nodes' in cluster_data: nodelist = cluster_data['nodes'] noOfNodes = len(nodelist) del cluster_data['nodes'] # Return from here if nodelist is empty if noOfNodes == 0: log.info("Node List is empty, Cluster creation failed") raise usm_rest_utils.ClusterCreationFailed( nodelist, "Node List is empty, Cluster creation failed") # create the cluster try: usm_rest_utils.create_cluster(cluster_data) except Exception, e: log.exception(e) raise usm_rest_utils.ClusterCreationFailed( nodelist, str(e))
def createGlusterBrick(data): log.debug("Inside createGlusterBrick Async Task %s" % data) current_task.update_state(state='STARTED') bricklist = data['bricks'] # Return from here if bricklist is empty if len(bricklist) == 0: log.info("Brick List is empty, Brick addition failed") raise Exception( bricklist, "Brick List is empty, Brick addition failed") # Prepare the disks for brick creation failed = usm_rest_utils.create_gluster_brick(bricklist) log.critical("Brick creation failed for bricks: %s" % str(failed)) # Remove the failed bricks from bricklist bricks = [item for item in bricklist if item not in failed] # Return from here if bricks are empty if len(bricks) == 0: log.info("Brick List is empty, Brick creation failed") raise Exception( bricklist, "Brick Creation failed") # create the Brick try: volume = GlusterVolume.objects.get(pk=str(data['volume'])) rc = usm_rest_utils.add_volume_bricks(volume, bricks) if rc is False: raise Exception( bricks, "Brick List is empty, Brick addition failed") except Exception, e: log.exception(e) raise
def shodan_scan_task(id): SHODAN_API_KEY = keys['keys']['shodan'] device = Device.objects.get(id=id) api = Shodan(SHODAN_API_KEY) try: # Search Shodan results = api.host(device.ip) # Show the results total = len(results['ports']) for counter, i in enumerate(results['data']): product = '' tags = "" if 'product' in i: product = i['product'] if 'tags' in i: tags = i['tags'] print(counter) device1 = ShodanScan(device=device, products=product, ports=str(i['port']), module=i['_shodan']['module'], tags=tags) device1.save() current_task.update_state(state='PROGRESS', meta={'current': counter, 'total': total, 'percent': int((float(counter) / total) * 100)}) return {'current': total, 'total': total, 'percent': 100} except Exception as e: print(e.args)
def destroy_server(token, id): """Destroys a VRE server in ~okeanos .""" current_task.update_state(state="Started") vre_server = VreServer.objects.get(id=id) auth = check_credentials(token) current_task.update_state(state="Authenticated") set_server_state(token, id, 'Deleting VRE server and its public IP') endpoints, user_id = endpoints_and_user_id(auth) cyclades = init_cyclades(endpoints['cyclades'], token) nc = init_cyclades_netclient(endpoints['network'], token) cyclades.delete_server(vre_server.server_id) new_status = cyclades.wait_server(vre_server.server_id, current_status='ACTIVE', max_wait=MAX_WAIT) if new_status != 'DELETED': state = 'Error while deleting VRE server' set_server_state(token, id, state, status='Destroyed') raise ClientError('Error while deleting VRE server', error_fatal) ip_to_delete = get_public_ip_id(nc, vre_server.server_IP) nc.delete_floatingip(ip_to_delete['id']) state = 'VRE server {0} and its public IP {1} were deleted'.format( vre_server.server_name, vre_server.server_IP) set_server_state(token, id, state, status='Destroyed') return vre_server.server_name
def run(self, **kwargs): current_task.update_state(state=constants.RUNNING_STATUS) file_id = str(kwargs['file_id']) submission_id = str(kwargs['submission_id']) file_path_irods = str(kwargs['file_path_irods']) file_path_client = str(kwargs['file_path_client']) index_file_path_client = str(kwargs['index_file_path_client'])
def clean_and_parse(data_frames, position, task_id, total_applicants, applicant_counter): # Pre-processing of the tables to insure for easy processing and string matching. applications = [] applicant_page_numbers = [] applicant_count = 0 for index, data_frame in enumerate(data_frames): if not data_frame.empty: data_frame = data_frame.astype(str) data_frame = data_frame.applymap(clean_data) data_frame.dropna(axis=1, how='all', inplace=True) data_frame.reset_index(drop=True, inplace=True) data_frames[index] = data_frame table_column_1 = data_frame[data_frame.columns[0]] if table_column_1.str.contains("Citoyenneté / Citizenship:").any(): applicant_count += 1 applicant_page_numbers.append(index) for current_applicant in range(len(applicant_page_numbers)): current_task.update_state(task_id=task_id, state='PROGRESS', meta={ 'current': applicant_counter + current_applicant + 1, 'total': total_applicants}) if current_applicant == (applicant_count - 1): print("Processing Applicant: " + str(current_applicant + 1)) applications.append(find_essential_details( data_frames[applicant_page_numbers[current_applicant]:], position)) else: print("Processing Applicant: " + str(current_applicant + 1)) applications.append(find_essential_details( data_frames[applicant_page_numbers[current_applicant]:applicant_page_numbers[current_applicant + 1]], position)) return applications
def start(self): tmpMessages=[] if not self.trained: self.__initPopulation__() self.simulations, self.fit=self.__eval__(self.population) self.frontLvls=self.__domination__(self.fit)[1] # compute for i0 in range(self.opt['epochs']): self.__iteration__() # output times tmpKeys=sorted(self.timing.keys()) tmpToPrint='' for key in tmpKeys: if key == 'total': tmpToPrint+=key + ': %.2e' % (self.timing['total']) + 's; ' else: if self.timing['total']!=0: tmpToPrint+=key + ': %.2f' % (self.timing[key]/self.timing['total']*100) + '%; ' else: tmpToPrint+=key + ': %.2f' % (0) + '%; ' if (np.mod(i0+1, self.opt['displayEach'])==0 and i0!=0) or i0==self.opt['epochs']-1: print('Epoch %5u: ' % (i0) + tmpToPrint[:-2]) tmpMessages.append('___Epoch %05u: ' % (i0) + tmpToPrint[:-2]) # QQ plot tmpSimulations = self.processBands(self.predict()) uniform, pValues = self.predictiveQQ(tmpSimulations) # metrics alpha, xi, piRel = self.metrics(uniform, pValues, tmpSimulations) print(' metrics: alpha:%f, xi:%f, pi:%f' % (alpha, xi, piRel)) tmpMessages.append('______metrics: alpha:%f, xi:%f, pi:%f' % (alpha, xi, piRel)) # output messages tmpKeys=sorted(self.messaging.keys()) tmpToPrint='' for key in tmpKeys: tmpToPrint+=key + ': ' + self.messaging[key] + '; ' if tmpToPrint: print(' msg: ' + tmpToPrint[:-2]) tmpMessages.append('______msg: ' + tmpToPrint[:-2]) try: current_task.update_state(state='PROGRESS', meta={'message': tmpMessages, 'title': None}) except Exception: pass self.trained = True # export results result = {'parameters': self.population, 'normalization': self.normalize,'fitness': self.fit} # plot if self.opt['plotResult']: self._plot() return result
def asigclasdoc_task(ipadd,periodo_pk,jornada_pk,modalidad_pk,institution,token,input_excel):#,email): log1,log2 = None,None excel_name = input_excel.name task_id = asigclasdoc_task.request.id #email = email current_task.update_state(state='PROGRESS', meta={'process_percent': 0}) filename1 = path + "ERRORES_" + "asignacion_clases_docentes" + "_" + institution + "_" + task_id + ".txt" filename2 = path + "CORRECTOS_" + "asignacion_clases_docentes" + "_" + institution + "_" + task_id + ".txt" log1 = open(filename1, "w") log2 = open(filename2, "w") try: headers = {'Authorization':"Token " + token} book = xlrd.open_workbook(file_contents=input_excel.read(),encoding_override='cp1252') sh = book.sheet_by_index(0) log1.write("Errores del proceso: Asignacion de clases a docentes; Log para archivo " + excel_name + "\n") log2.write("Correctos del proceso: Asignacion de clases a docentes; Log para archivo " + excel_name + "\n") if check_headers(sh,'asigclasdoc'): num_rows = sh.nrows - 1 num_cells = sh.ncols - 1 curr_row = 0 while curr_row < num_rows: curr_row += 1 grado = sh.cell_value(rowx=curr_row,colx=0) sec = sh.cell_value(rowx=curr_row,colx=1) id_doc = sh.cell_value(rowx=curr_row,colx=2) clase = sh.cell_value(rowx=curr_row,colx=3) grado = grado.replace(" ", "_") id_doc = get_correct_id(id_doc) #arr = clase.split(" ") #clase = clase.encode('ascii','ignore').replace(" ", "").replace("/", "").rstrip() clase = remove_special(clase) #passed,clase_nomb=bring_classname(arr[1],institution) #if passed: #if clase_nomb != 'NO SE ENCONTRO REFERENCIA DE CLASE': completeip= ipadd + "api/asigclasdoc/" + str(int(curr_row +1)) + "/" + periodo_pk + "/" + jornada_pk + "/" + modalidad_pk + "/" + grado + "/" + sec + "/" + id_doc + "/" + clase r = requests.post(completeip, headers = headers) ret = r.text if 'ERROR' in ret: log1.write(r.text + "\n") else: log2.write(r.text + "\n") #else: #log.write("ERROR: Linea " + str(int(curr_row + 1)) + " " + clase_nomb + " " + clase + "\n") #else: #log.write("ERROR: se ha generado un error de sistema; " + clase_nomb) #break process_percent = int(100 * float(curr_row) / float(num_rows)) current_task.update_state(state='PROGRESS', meta={'process_percent': process_percent}) else: log1.write("ERROR: El formato del archivo no es el correcto") except Exception, e: log1.write("ERROR: Se ha generado un error de sistema;Detalles: " + str(e))
def progress_hook(self, info): current_task.update_state( state='PROGRESS', meta={ 'current': info.get('downloaded_bytes', '0'), 'total': info.get('total_bytes', '0') } )
def push_updates(card): if card is not None: inserted_cards[card.name] = card current_task.update_state( state='PROGRESS', meta=dict(inserted_cards=inserted_cards) )
def testtask(start_value=0, end_value=10, step=1): current_value = start_value while current_value < end_value: current_value += step current_task.update_state(state='IN_PROGRESS', meta=dict(current_value=current_value)) time.sleep(10) return 'final result'
def create_yarn_cluster(self): """Create Yarn cluster""" try: current_task.update_state(state="Started") self.HOSTNAME_MASTER_IP, self.server_dict = self.create_bare_cluster() except Exception, e: logging.error(str(e.args[0])) raise
def timeseriesFileTask(fileid, projectID): uploadedfile = UploadedFile.objects.get(pk=fileid) filename = uploadedfile.file.path meta = {"type": "Timeseries File", "projectID": projectID, "filename": filename.split("/")[-1]} current_task.update_state(state="STARTED", meta=meta) resp = insertTimeseriesFromTable(filename, projectID) resp.update(meta) return resp
def load_bed(bed_file, db_name, db_host, db_user, db_password): current_task.update_state(state='STARTED') db = SQLDB(db_name, db_host, db_user, db_password) try: ngsvtools.bedloader.load(bed_file, db, action=BedLoaderAction) except UnsupportedFileError, e: return {'state': 'SUCCESS_WITH_ALERT', 'alert': e.msg}
def message_callback(message, message_type, extra=None): messages.append(dict(message=message, message_type=message_type, extra=extra)) current_task.update_state( state=states.STARTED, meta=dict( changeset_id=changeset_pk, user_id=applied_by_user_pk, server_id=server_pk, messages=messages ), )
def report_progress(progress, comment): current_task.update_state( state='PROGRESS', meta={ 'state': 'RUNNING', 'progress': int(progress), 'comment': comment })
def status_callback(nmapscan=None): """status callback""" try: current_task.update_state(state="PROGRESS", meta={"done": nmapscan.progress, "etc": nmapscan.etc}) except Exception as e: print("status_callback error: " + str(e))
def test_job(wait): for i in range(wait): time.sleep(1) progress = 100 * (float(i)/float(wait)) current_task.update_state(state='PROGRESS', meta={'progress': progress}) return datetime.datetime.now()
def write_annotation(annotation_id): """ Annotate variants with frequencies from the database. :arg annotation_id: Annotation to write. :type annotation_id: int """ logger.info('Started task: write_annotation(%d)', annotation_id) current_task.update_state(state='PROGRESS', meta={'percentage': 0}) annotation = Annotation.query.get(annotation_id) if annotation is None: raise TaskError('annotation_not_found', 'Annotation not found') if annotation.task_done: raise TaskError('annotation_written', 'Annotation already written') # If running eagerly, task id cannot have been stored yet. But perhaps # this is not a good check anyway... if (not current_task.request.is_eager and annotation.task_uuid and annotation.task_uuid != current_task.request.id): raise TaskError('annotation_writing', 'Annotation is being written ' 'by another task instance') original_data_source = annotation.original_data_source annotated_data_source = annotation.annotated_data_source # Calculate data digest if it is not yet known. if not original_data_source.checksum: with original_data_source.data() as data: (original_data_source.checksum, original_data_source.records) = digest(data) db.session.commit() try: original_data = original_data_source.data() annotated_data = annotated_data_source.data_writer() except DataUnavailable as e: raise TaskError(e.code, e.message) try: with original_data as original, \ annotated_data as annotated_variants: annotate_data_source(original, annotated_variants, original_filetype=original_data_source.filetype, annotated_filetype=annotated_data_source.filetype, queries=annotation.queries, original_records=original_data_source.records) except ReadError as e: annotated_data_source.empty() raise TaskError('invalid_data_source', str(e)) current_task.update_state(state='PROGRESS', meta={'percentage': 100}) annotation.task_done = True db.session.commit() logger.info('Finished task: write_annotation(%d)', annotation_id)
def process_update(progress, percent, name, is_celery = False): if is_celery is True: current_task.update_state( state = progress, meta = { c.CELERY_PRO_PERCENT: percent, c.PRO_NAME: name } )
def update_task_status(progress, total): """ Updates the current task with the progress """ info = { 'current': progress, 'total': total } current_task.update_state(state='PROGRESS', meta=info)