def v2_runner_on_async_failed(self, result): host = result._host.get_name() task = result._task.get_name() jid = result._result.get('ansible_job_id') #self.runner_on_async_ok(host, result._result, jid) #print(json.dums({"{}|{}|{}".format(host.name, task.name, jid):result._result})) print(json.dums({"{}|{}|{}".format(host, task, jid): result._result}))
def createMachine(self, args, ssh_keys): start_time = time.time() # for time measurement # Initiate droplet creation droplet = self.api.createDroplet(args.region, args.droplet_name, args.droplet_size, args.image, ssh_keys) droplet_id = droplet["droplet"]["id"] print('Droplet creation initiated, id={id}'.format(id=droplet_id)) # Wait 20 seconds to allow DigitalOcean to create the droplet time.sleep(20) retries = 0 state = dict() # Periodically check droplet status while retries < settings.MAX_RETRIES: state = self.api.status(droplet_id) status = state["droplet"]["status"] if status == "new": print("Droplet not yet active, retrying in 5s") elif status == "active": # Machine is up and running! duration = time.time() - start_time print("Droplet is up and running! Duration: {time:.0f}s".format(time=duration)) break else: print("Droplet has weird status, retrying in 5s:", status, json.dums(state, indent=4)) time.sleep(5) retries += 1 if not status: # Failed to create droplet raise ValueError("failed to create droplet :(") # Dump droplet state, which can be passed into --debug-state print(json.dumps(state)) return state
def checkNum(request): team = Team.objects.get(id_code=request.POST['id_code']) if checkNum(team,request.POST['number']): response_data['success'] = 0 response_data['message'] = '号码已存在' else: response_data['success'] = 1 return HttpResponse(json.dums(response_data),content_type='application/json')
def v2_runner_on_async_poll(self, result): host = result._host.get_name() task = result._task.get_name() jid = result._result.get('ansible_job_id') # FIXME, get real clock clock = 0 #self.runner_on_async_poll(host, result._result, jid, clock) #print(json.dums({"{}|{}|{}".format(host.name, task.name, jid):result._result})) print(json.dums({"{}|{}|{}".format(host, task, jid): result._result}))
def action(): query = bottle.request.query.q article = collection.find_article_by_title(query) bottle.response.content_type = 'application/json' if article is None: return json.dums({'textToSpeech': '404 Not Found'}, index=2, separators=(',', ':'), ensure_ascii=False) return json.dumps({'textToSpeech': article.opening_text}, indent=2, separators=(',', ': '), ensure_ascii=False)
def action(): query = bottle.request.query.q terms = analyse.extractWords(query, wp.FilterWords.excludeParticles) titles = index.search(terms) bottle.response.content_type = 'application/json' if titles is None: return json.dums({'textToSpeech': 'はい残念みつからないよー'}, index=2, separators=(',', ':'), ensure_ascii=False) return json.dumps({'textToSpeech': 'か'.join(titles)}, indent=2, separators=(',', ': '), ensure_ascii=False)
def delete(self, id): try: review = models.Review.select().where( models.Review.created_by==g.user, models.Review.id==id ).get() except models.Review.DoesNotExist: return make_response(json.dums( {'error': 'That review does not exit or is not editable.'} ), 403) query = review.delete() query.execute() return '', 204, { 'Location': url_for('resources.reviews.reviews') }
def action(): query = bottle.request.query.q terms = analyse.extractWords(query) table1 = index.sortSearchReturnTable(terms) table2 = indexOpeningText.sortSearchReturnTable(terms) title = index.returnBestFromTable(index.mergeTable(table1, table2)) bottle.response.content_type = 'application/json' if title is None: return json.dums({'textToSpeech': 'はい残念みつからないよー'}, index=2, separators=(',', ':'), ensure_ascii=False) return json.dumps({'textToSpeech': title}, indent=2, separators=(',', ': '), ensure_ascii=False)
def action(): query = bottle.request.query.q terms = analyse.extractWords(query) ngrams = analyse.divide_ngrams(query) print('Debug: get document title containing ngrams', index.ngrams_search(ngrams)) title = index.sortSearch(terms) bottle.response.content_type = 'application/json' if title is None: return json.dums({'textToSpeech': 'はい残念みつからないよー'}, index=2, separators=(',', ':'), ensure_ascii=False) return json.dumps({'textToSpeech': title}, indent=2, separators=(',', ': '), ensure_ascii=False)
def put(self, id): args = self.reqparse.parse_args() try: review = models.Review.select().where( models.Review.created_by==g.user, models.Review.id==id ).get() except models.Review.DoesNotExist: return make_response(json.dums( {'error': 'That review does not exit or is not editable.'} ), 403) query = review.update(**args) query.execute() review = add_course(review_or_404(id)) return (review, 200, { 'Location': url_for('resources.reviews.review', id=review.id) })
def send(self): self.listener.sendMessage(json.dumps(["bet", "processing"])) itemsToReceive = [] for assetID in self.items: itemsToReceive.append({ "appid": 440, "contextid": 2, "amount": 1, "assetid": str(assetID).encode("utf-8") }) self.offerID = self.Bot.Trade().sendOffer(self.Partner, [], itemsToReceive, "Thanks for betting with Saloon.tf!") if self.offerID: self.listener.sendMessage(json.dumps(["tradeOffer", self.offerID])) self.monitor() else: self.listener.sendMessage(json.dums(["tradeOffer", False]))
def get(self, request): """ 通过django的view实现商品列表页 :param request: :return: """ json_list = [] goods = Goods.objects.all()[:10] for good in goods: json_dict = {} json_dict["name"] = good.name json_dict["category"] = good.category.name json_dict["market_price"] = good.market_price json_dict["add_time"] = goos.add_time json_dict.append(json_dict) return HttpResponse(json.dums(json_list), content_type="application/json")
def test_get_active_task(): """Test of returning active task""" task_data = json.dums({ "name": "1", "desc": "2", "value": 3, "id": 4, "category": "5", "level": 6, }) save_to_test_file("""{"ACTIVE_TASK":%s}""" % task_data) tmg = TaskManager(TEST_JSON_FILE) task_1 = Task(json.loads(task_data)) task_2 = tmg.get_current_task() assert task_1.id == task_2.id assert task_1.name == task_2.name assert task_1.desc == task_2.desc assert task_1.value == task_2.value assert task_1.category == task_2.category assert task_1.level == task_2.level
def createMachine(self, args, ssh_keys): start_time = time.time() # for time measurement # Initiate droplet creation droplet = self.api.createDroplet(args.region, args.droplet_name, args.droplet_size, args.image, ssh_keys) droplet_id = droplet["droplet"]["id"] print('Droplet creation initiated, id={id}'.format(id=droplet_id)) # Wait 20 seconds to allow DigitalOcean to create the droplet time.sleep(20) retries = 0 state = dict() # Periodically check droplet status while retries < settings.MAX_RETRIES: state = self.api.status(droplet_id) status = state["droplet"]["status"] if status == "new": print("Droplet not yet active, retrying in 5s") elif status == "active": # Machine is up and running! duration = time.time() - start_time print( "Droplet is up and running! Duration: {time:.0f}s".format( time=duration)) break else: print("Droplet has weird status, retrying in 5s:", status, json.dums(state, indent=4)) time.sleep(5) retries += 1 if not status: # Failed to create droplet raise ValueError("failed to create droplet :(") # Dump droplet state, which can be passed into --debug-state print(json.dumps(state)) return state
def get_user_deals(request, uid): resp = {} if request.method != 'GET': resp['status'] = 1 resp['message'] = 'Wrong http method!' return HttpResponse(json.dums(resp), content_type='application/json') user = User.objects.filter(id=uid) if not user.exists(): resp['status'] = 2 resp['message'] = 'No such user' return HttpResponse(json.dumps(resp), content_type='application/json') elif len(user) > 1: resp['status'] = 3 resp['message'] = 'Too many user found, Impossible!' return HttpResponse(json.dumps(resp), content_type='application/json') helper_deals = Deal.objects.filter(helper=user) needer_deals = Deal.objects.filter(needer=user) helper_deals_info = [] needer_deals_info = [] for helper_deal in helper_deals: tmpinfo = helper_deal.to_dict() tmpinfo['helper'] = helper_deal.helper.to_dict() tmpinfo['needer'] = helper_deal.needer.to_dict() helper_deals_info.append(tmpinfo) for needer_deal in needer_deals: tmpinfo = needer_deal.to_dict() tmpinfo['helper'] = needer_deal.helper.to_dict() tmpinfo['needer'] = needer_deal.needer.to_dict() needer_deals_info.append(tmpinfo) resp['status'] = 0 resp['message'] = 'Success!' resp['data'] = {} resp['data']['needer_deal'] = needer_deals_info resp['data']['helper_deal'] = helper_deals_info return HttpResponse(json.dumps(resp), content_type='application/json')
def modifier(task): if task.label != label: return task if input.get('testPath', ''): is_wpttest = 'web-platform' in task.task['metadata'][ 'name'] is_android = 'android' in task.task['metadata']['name'] gpu_required = False if (not is_wpttest) and \ ('gpu' in task.task['metadata']['name'] or 'webgl' in task.task['metadata']['name'] or ('reftest' in task.task['metadata']['name'] and 'jsreftest' not in task.task['metadata']['name'])): gpu_required = True # Create new cmd that runs a test-verify type job preamble_length = 3 verify_args = [ '--e10s', '--verify', '--total-chunk=1', '--this-chunk=1' ] if is_android: # no --e10s; todo, what about future geckoView? verify_args.remove('--e10s') if gpu_required: verify_args.append('--gpu-required') if 'testPath' in input: task.task['payload']['env'][ 'MOZHARNESS_TEST_PATHS'] = json.dums({ task.task['extra']['suite']['flavor']: input['testPath'] }) cmd_parts = task.task['payload']['command'] keep_args = [ '--installer-url', '--download-symbols', '--test-packages-url' ] cmd_parts = remove_args_from_command( cmd_parts, preamble_length, keep_args) cmd_parts = add_args_to_command(cmd_parts, verify_args) task.task['payload']['command'] = cmd_parts # morph the task label to a test-verify job pc = task.task['metadata']['name'].split('/') config = pc[-1].split('-') subtype = '' symbol = 'TV-bf' if gpu_required: subtype = '-gpu' symbol = 'TVg-bf' if is_wpttest: subtype = '-wpt' symbol = 'TVw-bf' if not is_android: subtype = "%s-e10s" % subtype newlabel = "%s/%s-test-verify%s" % (pc[0], config[0], subtype) task.task['metadata']['name'] = newlabel task.task['tags']['label'] = newlabel task.task['extra']['index']['rank'] = 0 task.task['extra']['chunks']['current'] = 1 task.task['extra']['chunks']['total'] = 1 task.task['extra']['suite']['name'] = 'test-verify' task.task['extra']['suite']['flavor'] = 'test-verify' task.task['extra']['treeherder']['symbol'] = symbol del task.task['extra']['treeherder']['groupSymbol'] return task
def process_item(self, item, spider): text = json.dums(dict(item),ensure_ascii = False) + "\n"
def __call__(self, heap): """ handle heaps. Merge polarization heaps with matching timestamps and pass to output queue. """ _log.debug("Unpacking heap") items = self.ig.update(heap) if 'data' not in items: # On first heap only, get number of channels fft_length = convert48_64(items['fft_length'].value) nchannels = int((fft_length / 2) + 1) _log.debug( "First item - setting data length to {} channels".format( nchannels)) self.ig.add_item(5640, "data", "", (nchannels, ), dtype="<f") # Reprocess heap to get data items = self.ig.update(heap) _log.debug("Checking missign keys.") if len(items.keys()) != len(self.ig.items()): missing_keys = [] for key in self.ig.items(): if key[0] not in items: missing_keys.append(key[0]) _log.warning( "Received invalid heap, containing only {} / {} keys. Missign keys:\n {}" .format(len(items.keys()), len(self.ig.items()), " \n".join(missing_keys))) return _log.debug("No missing keys.") pol = convert48_64(items["polarization"].value) nds = convert48_64(items["noise_diode_status"].value) section_id = self.__group_prefix + "{}_ND_{}".format(pol, nds) _log.debug("Set section_id: {}".format(section_id)) sampling_rate = float(convert48_64(items["sampling_rate"].value)) fft_length = convert48_64(items["fft_length"].value) number_of_input_samples = convert48_64( items["number_of_input_samples"].value) number_of_saturated_samples = convert48_64( items["number_of_saturated_samples"].value) naccumulate = convert48_64(items["naccumulate"].value) sync_time = convert48_64(items["sync_time"].value) timestamp_count = convert48_64(items["timestamp_count"].value) # Integration period does not contain efficiency of sampling as heaps may # be lost respectively not in this gate integration_period = (naccumulate * fft_length) / sampling_rate # The reference time is in the center of the integration # period reference_time = float( sync_time) + float(timestamp_count) / sampling_rate + float( integration_period / 2.) _log.debug("Set timestamp: {}".format(reference_time)) data = {} data['timestamp'] = np.array([reference_time]) data['integration_time'] = np.array( [number_of_input_samples / sampling_rate]) data['saturated_samples'] = np.array([number_of_saturated_samples]) _log.debug("Meta data:\n{}".format(json.dums(data, indent=4))) data['spectrum'] = items['data'].value return section_id, data, self.__attributes