def _solve(g, name, params): from grb_lazy import solve_problem as solve print() print('*** STARTING A NEW PROBLEM ***') print() print('name:', name) print('params:', params) # nontriv_sccs = sum(1 for sc in strongly_connected_components(g) if len(sc) > 1) assert nontriv_sccs == 1, nontriv_sccs assert g.number_of_selfloops() == 0 # stats = Stats(name=name, params=params, is_optimal=True, cost=None, ILP=0, node=0, iter=0, time=None) start = time() elims, cost, cycle_matrix = solve(g, stats) end = time() # stats.time = end - start # stats.cost = cost print_stats(g, stats) fname = (name + '_' + params).replace(' ', '_') serialize(cycle_matrix, TMP_DIR + 'cycle_matrix_' + fname + '.pkl.gz') serialize(elims, TMP_DIR + 'solution_' + fname + '.pkl.gz') # return stats
def get_numeric_jacobian(problem_name): # Always return the Jacobian as list of (i, j, J_ij) jacfile = 'data/' + problem_name + '.jac.pkl.gz' if isfile(jacfile): print('Using cached Jacobian for', problem_name) return deserialize(jacfile) # print('Computing then saving the Jacobian for', problem_name) g, problem = create_dag(problem_name) code = numeric_diff(g, problem) globals_ = {} try: exec_(code, globals_) except: print( '===============================================================') print(code) print( '===============================================================') raise J = globals_['J'] # Jrows = get_J_rowwise(problem) jac = [(i, j, J[i, j]) for i, cols in enumerate(Jrows) for j in cols] serialize(jac, 'data/' + problem.name + '.jac.pkl.gz') return jac
def save_data(self): # if not os.path.exists(os.path.join(self.root_directory, "ent2ids")): # serialize(self.entity_dict, os.path.join(self.root_directory, "ent2ids"), in_json=True) # if not os.path.exists(os.path.join(self.root_directory, "relation2ids")): # serialize(self.relation_dict, os.path.join(self.root_directory, "relation2ids"), in_json=True) # if not os.path.exists(os.path.join(self.root_directory, "e1rel_e2.json")): # e1rel_e2 = defaultdict(list) # for head, relation, tail in itertools.chain(self.facts_data, *self.test_tasks.values(), # *self.valid_tasks.values()): # if isinstance(relation, int): # relation = self.id2relation[relation] # e1rel_e2[self.id2entity[head] + relation].append(self.id2entity[tail]) # serialize(e1rel_e2, os.path.join(self.root_directory, "e1rel_e2.json"), in_json=True) if not os.path.exists( os.path.join(self.data_directory, "rel2candidates.json")): rel2candidates = { key: list(map(self.id2entity.__getitem__, value)) for key, value in self.rel2candidate.items() } train_tasks = set( map(lambda x: x[1], load_facts(os.path.join(self.data_directory, "train.txt")))) for task in train_tasks: rel2candidates[task] = self.id2entity serialize(rel2candidates, os.path.join(self.data_directory, "rel2candidates.json"), in_json=True) serialize(self.rel2candidate, os.path.join(self.data_directory, "rel2candidates")) save_index(self.id2entity, os.path.join(self.data_directory, "ent2id.txt")) save_index(self.id2relation, os.path.join(self.data_directory, "relation2id.txt"))
def get_properties_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str, limit: int = 1000) -> ResourceDictionary: """ Return a ResourceDictionary with the list of properties in the ontology :param serialized_file: The file where the properties ResourceDictionary is serialized :param sparql_file: The file containing the SPARQL query :param repository: The repository containing the ontology :param endpoint: The SPARQL endpoint :param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called) :param limit: The sparql query limit :return: A ResourceDictionary with the list of properties in the ontology """ global_properties_dict = deserialize(serialized_file) if global_properties_dict: return global_properties_dict global_properties_dict = ResourceDictionary() global_properties_dict.add(RDF.type) properties_sparql_query = open(sparql_file).read() properties_sparql_query_template = Template( properties_sparql_query + " limit $limit offset $offset ") for rdf_property in get_sparql_results(properties_sparql_query_template, ["property"], endpoint, repository, endpoint_type, limit): global_properties_dict.add(rdf_property[0]) serialize(global_properties_dict, serialized_file) return global_properties_dict
def get_classes_dict(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str, limit: int = 1000) -> ResourceDictionary: """ Return a ResourceDictionary with the list of classes in the ontology :param serialized_file: The file where the properties ResourceDictionary is serialized :param sparql_file: The file containing the SPARQL query :param repository: The repository containing the ontology :param endpoint: The SPARQL endpoint :param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called) :param limit: The sparql query limit :return: A ResourceDictionary with the list of classes in the ontology """ classes_dictionary = deserialize(serialized_file) if classes_dictionary: return classes_dictionary classes_dictionary = ResourceDictionary() classes_sparql_query = open(sparql_file).read() classes_sparql_query_template = Template(classes_sparql_query + " limit $limit offset $offset ") for class_uri in get_sparql_results(classes_sparql_query_template, ["class"], endpoint, repository, endpoint_type, limit): classes_dictionary.add(class_uri[0]) serialize(classes_dictionary, serialized_file) return classes_dictionary
def __call__(self, config): start_time = time.time() np.random.seed(config.seed) tf.set_random_seed(config.seed) print('Starting id = {}, n_parallel = {}, n_hidden = {}'.format( config.index, config.n_parallel, config.n_hidden)) x = np.array([-1.0, -2.0, -3.0, 1.0, 2.0, 3.0]) y = np.array([-1.0, 2.0, -1.0, 1.0, -2.0, 1.0]) + float( self.offset_str) results = [ run_single(config.n_hidden, config.n_samples, x, y, lr=1e-2 / config.n_hidden) for i in range(config.n_parallel) ] try: summary = MCResult(num_crossed=np.count_nonzero(results), num_total=len(results)) print('id = {}, n_parallel = {}, n_hidden = {}: {}/{}'.format( config.index, config.n_parallel, config.n_hidden, summary.num_crossed, summary.num_total)) target_folder = self.base_dir + 'mc-data-{}/id-{}_hidden-{}_parallel-{}_time-{}/'.format( self.offset_str, config.index, config.n_hidden, config.n_parallel, int(start_time * 1000)) utils.serialize(target_folder + 'result.p', summary) utils.serialize(target_folder + 'config.p', config) except Exception as e: print(e)
def run_training(n_hidden=256, ds_type='2d_star_11', n_parallel=1000, n_epochs=1000000, random_bias=False, act='relu', n_layers=1, device_number=0, version=0): print('Start time:', datetime.datetime.now()) if n_layers > 1: name = f'{n_hidden}x{n_layers}-{n_parallel}-{random_bias}-{act}-v{version}' else: name = f'{n_hidden}-{n_parallel}-{random_bias}-{act}-v{version}' x_train, y_train = get_2d_star_dataset(k=11, dist=0.1) print(f'Running model for {n_epochs} epochs on dataset {ds_type}: {name}') base_dir = Path(get_results_path()) file_dir = base_dir/ds_type/name file_path = file_dir/'model_trainer.p' if utils.existsFile(file_path): print('Loading existing model') mt = utils.deserialize(file_path) mt.to(get_device(device_number)) else: print('Creating new model') mt = ModelTrainer(x_train, y_train, n_parallel=n_parallel, hidden_sizes=[n_hidden] * n_layers, n_virtual_samples=n_hidden**2, random_bias=random_bias, act=act, device_number=device_number, version=version) mt.train(n_epochs) mt.to('cpu') utils.serialize(file_path, mt) utils.serialize(file_dir/'config.p', dict(ds_type=ds_type, n_parallel=n_parallel, n_layers=n_layers, random_bias=random_bias, act=act, n_epochs=n_epochs, version=version)) print('Saved trained model') print('End time:', datetime.datetime.now())
def search_evaluate_graph(self, wiki=True): self.kg = KG(self.train_facts, entity_num=len(self.entity_dict), relation_num=len(self.relation_dict)) rel2candidates = { key: set(value) for key, value in self.rel2candidate.items() } if wiki: evaluate_graphs = {} for head, relation, tail in tqdm( itertools.chain(self.valid_facts, self.test_facts)): candidates = rel2candidates[relation] | {tail} evaluate_graphs[(head, relation, tail)] = self.kg.graphs_among(head, candidates, cutoff=3) else: evaluate_pairs = {} for head, relation, tail in itertools.chain( self.valid_facts, self.test_facts): evaluate_pairs.setdefault((head, relation), set()) evaluate_pairs[(head, relation)].add(tail) evaluate_graphs = {} for (head, relation), tail_set in tqdm(evaluate_pairs.items()): candidates = rel2candidates[relation] | tail_set evaluate_graphs[(head, relation)] = self.kg.graphs_among(head, candidates, cutoff=3) serialize(evaluate_graphs, os.path.join(self.data_directory, "evaluate_graphs"))
def run_finer_lrs(init_param='kaiming', device='cpu'): dist_grid = [ExampleDistribution() ] + [RadialDataDistribution(d=2**k) for k in range(7)] std_grid = [0.1, 0.5, 1.0, 2.0] # bi_grid = [('zero', 0.0), ('he+5', 0.0), ('he+1', 0.0), ('kink_uniform', 0.0)] \ # + [(bim, big) for big in std_grid for bim in ['normal', 'uniform']] \ # + [('pos-unif', 1.0), ('neg-unif', 1.0), ('kink-unif', 1.0), ('kink-neg-unif', 1.0), # ('kink-neg-point', 0.0)] bi_grid = [('zero', 0.0), ('unif', 1.0), ('unif-pos', 1.0), ('unif-neg', 1.0), ('kink-neg-unif', 1.0), ('pytorch', 1.0), ('kink-neg-point', 0.0)] for opt in ['gd', 'gd-mom', 'adam']: for dist in dist_grid: d = dist.get_x_dim() for bim, big in bi_grid: folder_name = f'{init_param}_{opt}_{dist.get_name()}_{bim}-{big:g}' path = Path(custom_paths.get_results_path() ) / 'nn_comparison' / folder_name best_lr_file = Path(custom_paths.get_results_path( )) / 'nn_comparison' / f'{folder_name}_bestlr.pkl' if not utils.existsFile(best_lr_file): sys.stderr.write( 'best lr file {best_lr_file} does not exist!\n') continue best_lr = utils.deserialize(best_lr_file) lr_grid = [best_lr * (2**(k / 8)) for k in range(-3, 4)] for lr in lr_grid: print(f'Running combination {folder_name} with lr {lr:g}') file = path / f'{lr:g}.pkl' utils.ensureDir(file) if utils.existsFile(file): continue n_rep = 2 if d == 64 else 1 trainer = SimpleParallelTrainer(n_parallel=100 // n_rep, n_train=256 * d, n_valid=1024, n_test=1024, data_distribution=dist, lr=lr, bias_init_gain=big, batch_size=256, bias_init_mode=bim, init_param=init_param, n_epochs=8192 // d, seed=0, device=device, n_hidden=512, opt=opt, valid_epoch_interval=64 // d, n_rep=n_rep) results = trainer.fit(do_plot=False, verbose=False) if results is None: print('Got NaN values') utils.serialize(file, { 'trainer': trainer, 'results': results })
def put(self, fr_id=None): if not fr_id: return abort(400) parser = reqparse.RequestParser() parser.add_argument('id', type=int, help='Unique ID of Feature Request record') parser.add_argument('title', type=str, help='Title for this feature, must be unique') parser.add_argument('description', type=str, help='Description for this feature') parser.add_argument( 'client', type=str, help= "Client to appoint for this feature, choices defined in models.py") parser.add_argument( 'client_priority', type=int, help="Priority number for this feature (1 being the lowest)") parser.add_argument( 'target_date', type=lambda x: datetime.strptime(x, '%Y-%m-%dT%H:%M:%S'), help= "Target date for completion in the format of Y-m-dTH:M:S; i.e.: 2016-07-12T23:13:3" ) parser.add_argument( 'product_area', type=str, help="Area for target feature, choices defined in models.py") args = parser.parse_args(strict=True) if not self._valid_choice_field(args['client'], FeatureRequest.CLIENTS): return serialize(args), 400 if not self._valid_choice_field(args['product_area'], FeatureRequest.PRODUCT_AREAS): return serialize(args), 400 models = db.session.query(FeatureRequest).filter( FeatureRequest.id == args['id']) if models.count(): try: models.update( dict(title=args['title'], description=args['description'], client=args['client'], client_priority=args['client_priority'], target_date=args['target_date'], product_area=args['product_area'])) db.session.commit() except exc.IntegrityError as e: return self._safe_exit_and_error(e) else: return self._safe_exit_and_error( "Invalid id: {pk}.".format(pk=fr_id)) return serialize( db.session.query(FeatureRequest).filter_by(id=fr_id).first())
def test_serialization(self): serialize(VECTOR, VECTOR_TEST_SERIALIZE_FILE) n_vector = deserialize(VECTOR_TEST_SERIALIZE_FILE) VECTOR.all_to_affine() self.assertEqual(n_vector, VECTOR) self.assertTrue(VECTOR.all_validate()) self.assertTrue(n_vector.all_validate())
def test_serialization(self): serialize(MATRIX, MATRIX_TEST_SERIALIZE_FILE) n_matrix = deserialize(MATRIX_TEST_SERIALIZE_FILE) MATRIX.all_to_affine() self.assertEqual(n_matrix, MATRIX) self.assertTrue(MATRIX.all_validate()) self.assertTrue(n_matrix.all_validate())
def mc_runner(config, offset_str, base_dir, use_sgd, use_early_stopping, use_sufficient_stopping, use_small_lr, initialize_custom): # method that can be run for a MCConfiguration object to do the computations and save them to a folder start_time = time.time() np.random.seed(config.seed) x = np.array([-1.0, -2.0, -3.0, 1.0, 2.0, 3.0]) y = np.array([-1.0, 2.0, -1.0, 1.0, -2.0, 1.0]) + float(offset_str) x_weights = np.random.multinomial( config.n_samples, [1. / 6.] * 6, size=config.n_parallel) / config.n_samples val_x_weights = np.random.multinomial( config.n_samples, [1. / 6.] * 6, size=config.n_parallel) / config.n_samples # x_weights = 1./6. * np.ones(shape=(n_parallel, 6)) (a, b, c, w) = CheckingNN.createRandomWeights(config.n_parallel, config.n_hidden, swap_variances=initialize_custom) train_setups = [ TrainingSetup(x, x_weights[i, :], y) for i in range(config.n_parallel) ] if use_small_lr: lrs = np.array([1e-2 / config.n_hidden] * config.n_parallel) else: lrs = np.array([ train_setups[i].compute_lr(a[i, 0, :], b[i, 0, :], w[i, 0, :]) for i in range(config.n_parallel) ]) net = CheckingNN((a, b, c, w), train_setups, lrs, val_x_weights=val_x_weights) max_num_checks = 10000 if initialize_custom else 1000 num_minibatches_per_check = 100 if initialize_custom else 1000 minibatch_size = 16 if use_sgd else None # non-stochastic gradient descent stopping_criteria = [CrossingStoppingCriterion()] if use_early_stopping: stopping_criteria.append( EarlyStoppingCriterion(min_delta=1e-8, patience=10)) if use_sufficient_stopping: stopping_criteria.append(SufficientStoppingCriterion()) net.train(stopping_criteria=stopping_criteria, max_num_checks=max_num_checks, num_minibatches_per_check=num_minibatches_per_check, minibatch_size=minibatch_size, verbose=False) target_folder = base_dir + 'mc-data-{}/hidden-{}_parallel-{}_id-{}_time-{}/'.format( offset_str, config.n_hidden, config.n_parallel, config.index, int(start_time * 1000)) utils.serialize(target_folder + 'net.p', net) utils.serialize(target_folder + 'config.p', config)
def get_properties_groups(serialized_file: str, sparql_file: str, repository: str, endpoint: str, endpoint_type: str, properties_dict: ResourceDictionary, limit: int = 1000) -> Dict: """ Return a dictionary containing the group ids for each property in the ontology (The group ids are determined via connected components) :param serialized_file: The file where the properties ResourceDictionary is serialized :param sparql_file: The file containing the SPARQL query :param repository: The repository containing the ontology :param endpoint: The SPARQL endpoint :param endpoint_type: GRAPHDB or VIRTUOSO (to change the way the endpoint is called) :param properties_dict: The ResourceDictionary containing the properties of the ontology :param limit: The sparql query limit :return: A dictionary containing the group ids for each property """ if os.path.isfile(serialized_file): properties_groups = deserialize(serialized_file) return properties_groups encoding_dir = os.path.dirname(serialized_file) if not os.path.exists(encoding_dir): os.makedirs(encoding_dir) sub_properties_dict = {} get_sub_properties_query = open(sparql_file).read() get_sub_properties_query_template = Template( get_sub_properties_query + " limit $limit offset $offset ") for (property1, property2) in get_sparql_results(get_sub_properties_query_template, ["property1", "property2"], endpoint, repository, endpoint_type, limit): if property2 not in sub_properties_dict: sub_properties_dict[property2] = [] sub_properties_dict[property2].append(property1) G = nx.Graph() for property1 in sub_properties_dict: for property2 in sub_properties_dict[property1]: G.add_edge(property1, property2) for property_uri in properties_dict: G.add_node(property_uri) properties_connected_components = {} index = 0 for c in nx.connected_components(G): for p in c: properties_connected_components[p] = index index += 1 serialize(properties_connected_components, serialized_file) return properties_connected_components
def get(self, fr_id=None): if fr_id: model = db.session.query(FeatureRequest).filter_by( id=fr_id).first() requests = [serialize(model)] if model else [] else: requests = [ serialize(entry) for entry in db.session.query(FeatureRequest).all() ] return {"requests": requests}
def __configureAttributes(self) -> None: for key in self.spells.keys(): self.spells[key].caster = self self.race.features = (serialize(self.race.features) if type( self.race.features) == list else self.race.features) for key in self.race.features.keys(): self.race.features[key].character = self for key in self.classes.keys(): self.classes[key].features = (serialize(self.classes[key].features) if type(self.classes[key].features) == list else self.classes[key].features) for key2 in self.classes[key].features.keys(): self.classes[key].features[key2].character = self
def setup_data_dir(self, serialise_name='env'): """ Creates data directories and serialises the environment. """ self.data_dir = self.__create_test_dir_name() utils.set_data_dir(self.data_dir) if serialise_name is not None: utils.serialize(serialise_name, self) # configure file logging self._log_handler = logging.FileHandler(filename=utils.DATA_DIR + '/env.log', mode='w') self._log_handler.setLevel(logging.DEBUG) logging.getLogger().addHandler(self._log_handler)
def get_scientists_list(scientists_file): import os if os.path.isfile(scientists_file): scientists_list = deserialize(scientists_file) return scientists_list encoding_dir = os.path.dirname(scientists_file) if not os.path.exists(encoding_dir): os.makedirs(encoding_dir) scientists_list = get_uris_of_class("", DBPEDIA_DATA_ENDPOINT, URIS_OF_CLASS_QUERY, 'Scientist', DBPEDIA_DATA_ENDPOINT_TYPE, QUERY_LIMIT) serialize(scientists_list, scientists_file) return scientists_list
def run(self): sam_file = utils.deserialize(self.in_data().path, load)[0] utils.mkdir_if_not_exist(self.outp_path) with utils.cd(self.working_dir): cmds = ['{tool} view -bS {sam_file} > res.bam'.format(tool=self.tool_path, sam_file=sam_file), '{tool} sort res.bam -o sorted.bam -O BAM'.format(tool=self.tool_path), '{tool} index sorted.bam'.format(tool=self.tool_path), # Make index fo reference '{tool} faidx {ref_fasta}'.format(tool=self.tool_path, ref_fasta=self.ref_fasta)] for cmd in cmds: try: run(cmd, shell=True, check=True) except CalledProcessError as e: raise e('failed') utils.serialize(glob('*'), self.out_data().path, dump)
def compute_dd_results(name, sampler, n_rep=10, n_parallel=1000, **kwargs): # computes the results in multiple repetitions and saves them # but only if the results are not already computed for rep in range(n_rep): print(f'Repetition {rep+1}/{n_rep}') filename = Path( 'data/double_descent/') / name / f'v{rep}_{n_parallel}.p' if utils.existsFile(filename): print('Results have already been computed') continue results = DoubleDescentResults(**kwargs, random_seed=rep, n_parallel=n_parallel) results.compute(sampler) utils.serialize(filename, results)
async def async_http_request(name, session: ClientSession, **kwargs) -> str: """ Invokes aiohttp client session request :param name: :param session: :param kwargs: :return: """ _kwargs = serialize(kwargs) async with session.request(timeout=DEFAULT_REST_REQUEST_TIMEOUT, **_kwargs) as resp: resp_data = await resp.text() description = HTTPCodesDescription.get_description( resp.status, **kwargs) logger.debug("'%s' '%s' %s %s, status: %s, description: %s" "\n\tpayload: %s\n\trequest headers: %s\n\tparams: %s" "\n\tresponse data: %s\n\tresponse headers: %s" % (kwargs['username'], name, kwargs['url'], kwargs['method'].upper(), resp.status, description, kwargs.get('data'), kwargs.get('headers'), kwargs.get('params'), resp_data, dict(resp.headers))) # TODO: replace dirty hack if resp.status not in list(range(200, 209)): raise ClientResponseError(request_info=kwargs, history='', code=resp.status) return resp_data
def GET(self): data = web.input() url = data['url'] ret = [] if url: ret = fetch_prices(url) return serialize(ret)
def add_user_to_group(self, user_id, group_id, socket): user = self._get_user_by_id(user_id) group = self._get_group_by_id(group_id) if not user: socket.send( serialize({ 'type': 'error', 'message': 'Invalid user ID' })) return if not group: group = Group(group_id) self.groups.append(group) app.logger.info(u'Created new group with id {}'.format( user_id, group_id)) added = group.add_user(user) if user.id in self.user_group_map and added: self.user_group_map[user.id].append(group) elif added: self.user_group_map[user.id] = [group] if added: app.logger.info(u'Added user {} to group {}'.format( user_id, group_id)) else: app.logger.info( u'Tried to add user {} to group {} but theyre already in there' .format(user_id, group_id))
def list_group_users(self, group_id, socket): group = self._get_group_by_id(group_id) if not group: socket.send( serialize({ 'type': 'error', 'message': 'Invalid group ID: {}'.format(group_id) })) return users = [] for user in group.users: users.append({'user_id': user.id}) socket.send(serialize({'type': 'list_group_users', 'users': users}))
def list_groups(self, socket): groups = [] for group in self.groups: groups.append({'group_id': group.id, 'num_users': group.num_users}) socket.send(serialize({'type': 'list_groups', 'groups': groups}))
def hash_transactions(self): ''' get hashed transactions by merkle tree ''' tx_byte_list = [utils.serialize(tx) for tx in self._transaction] merkle_tree = MerkleTree(tx_byte_list) return utils.decode(binascii.hexlify(merkle_tree.root_hash))
def transfer_message(previous_signature, next_owner_public_key): message = { "previous_signature": previous_signature, "next_owner_public_key": next_owner_public_key } return serialize(message) """
def write(self, status, message, data = None): output = [ str(status), str(message) ] if not data is None: data = utils.serialize(data) output.append(data.tostring()) #4 (ASCII) means end trasmit (like newline but via a non-printable char) os.write(sys.stdout.fileno(), ";".join(output)+ chr(4))
def reindex(self): self._bucket.reset() utxos = self._bc.find_utxo() for txid, outs in utxos.items(): self._bucket.put(txid, utils.serialize(outs)) self._bucket.save()
def write(self, status, message, data=None): output = [str(status), str(message)] if not data is None: data = utils.serialize(data) output.append(data.tostring()) #4 (ASCII) means end trasmit (like newline but via a non-printable char) os.write(sys.stdout.fileno(), ";".join(output) + chr(4))
def send_message(address, command, data, response=False): message = prepare_message(command, data) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect(address) s.sendall(serialize(message)) if response: return deserialize(s.recv(5000))
def GET(self): data = web.input() url = data['url'] ret = {} if url: results = db.select('product', what='name,url', where='url=$url', vars={'url':url}) if len(results): result = results[0] ret['name'], ret['url'] = result['name'], result['url'] query = "select * from contentanalysis.analyze where text='%s'" % ret['name'] anal = yqlquery(query) try: entities = anal['query']['results']['entities']['entity'] for entity in entities: if entity['types']['type'].get('region'): ret['region'] = entity['types']['type']['region'] break except (KeyError, TypeError, ValueError): ret['region'] = 'unknown' f = flipkart() past_prices = fetch_prices(url) cur_price = f.get_price(url) ret['price'] = {'past':past_prices, 'present':cur_price} prices = map(lambda x:x[1],past_prices) price_trend = order(prices) variances = monthly_variance(past_prices) variance_trend = order(variances) ret['trend'] = {'price':[price_trend,prices],'variance':[variance_trend,variances]} return serialize(ret)
def __serialize__(self, seenVals): o = {} for k, v in sorted(self.__dict__.items()): if k not in ["lock", "inputType"]: o[k] = utils.serialize(v, seenVals) o["inputType"] = str(self.inputType) return o
def get_capsule(request, capsule_id): try: cap = Capsule.objects.get(pk=capsule_id) return HttpResponse(serialize(cap), content_type="application/json") except ObjectDoesNotExist: return HttpResponse(json.dumps({'error': {'message': 'Capsule Does Not Exist', 'code': 404} }), content_type="application/json", status=404)
def write(self, status, message, data = None): #if self.__conn_status: try: output = [ str(status), str(message) ] if not data is None: data = utils.serialize(data) output.append(data.tostring()) send = ";".join(output)+ chr(4) for i in send: self.__serial.write(i) except Exception, e: self.__logger.warning("Problems when writing to MCU, trying reconnection...") self.__kill_ciao()
def create_capsule(request): # shouldnt have to deal with authors since capsule creation can only be from # the user that's logged in # tags are just a string of comma separated values query_dict = json.loads(request.body) try: cap = Capsule(**query_dict) cap.full_clean() cap.save() cap.authors.add(request.user) return HttpResponse(serialize(cap), content_type="application/json") except ValidationError: response_data['code'] = 500 return HttpResponse(json.dumps({'data': response_data}), content_type="application/json", status=500)
def create_msgid(self): """Create an i18n msgid from the tag contents.""" out = StringIO() out.write(self.element.text) for element in self.element: name = element.node.translation_name if name is None: out.write(utils.serialize(element, omit=True)) else: out.write("${%s}" % name) out.write(element.tail) msgid = out.getvalue().strip() msgid = ' '.join(msgid.split()) return msgid
def update_capsule(request, capsule_id): query_dict = json.loads(request.body) try: cap = Capsule.objects.filter(pk=capsule_id) query_dict.pop('first_created') query_dict.pop('last_modified') query_dict.pop('id') query_dict.pop('links') authors = User.objects.filter(username__in=query_dict.pop('authors')) # will only be one iteration for ca in cap: ca.update_nosave(**query_dict) ca.save() cap = cap[0] # can do this since cap is filtered on pk cap.authors.add(*authors) return HttpResponse(serialize(cap), content_type="application/json") except ObjectDoesNotExist: return HttpResponse(json.dumps({'error': {'message': 'Capsule Does Not Exist', 'code': 404} }), content_type="application/json", status=404)
def tag(self): return utils.serialize(self)
def main(): inp = sys.argv[1] out = sys.argv[2] model = cPickle.load(gzip.open(inp)) cPickle.dump(utils.serialize(model), gzip.open(out, 'w'))
def to_text(self): return serialize(self.body)
def serialize(self): """Serialize element into clause-statements.""" _ = [] # i18n domain if self.translation_domain is not None: _.append(clauses.Define( self.symbols.domain, types.value(repr(self.translation_domain)))) # variable definitions adding = set() if self.define is not None: # as an optimization, we only define the `default` # symbol, if it's present in the definition clause (as # string representation) if self.symbols.default in repr(self.define): default = types.value(self.symbols.default_marker_symbol) _.append(clauses.Assign(default, self.symbols.default)) for declaration, expression in self.define: if self.symbols.remote_scope in self.stream.scope[1]: define = clauses.Define( declaration, expression, self.symbols.remote_scope) else: define = clauses.Define(declaration, expression) for name in define.declaration: adding.add(name) _.append(define) # tag tail (deferred) tail = self.tail if self.fill_slot is None and self.translation_name is None: for part in reversed(tail): if isinstance(part, types.expression): _.append(clauses.Write(part, defer=True)) else: _.append(clauses.Out(part, defer=True)) # macro method macro = self.macro if macro is not None: if self.symbols.remote_scope in self.stream.scope[1]: dictionary = self.symbols.remote_scope else: dictionary = self.symbols.scope exclude = set(( self.symbols.scope, self.symbols.slots)) | \ self.stream.scope[0] | set(macro.args) scope = set(itertools.chain(*self.stream.scope[1:])) | set(( self.symbols.out, self.symbols.write)) args = tuple(macro.args) + tuple( "%s=%s" % (name, name) for name in scope if name not in exclude) _.append(clauses.Method( macro.name, args, decorators=macro.decorators, dictionary=dictionary)) # condition if self.condition is not None: _.append(clauses.Condition(self.condition)) # repeat if self.repeat is not None: variables, expression = self.repeat newline = True for element in self.element.walk(): node = element.node if node and node.omit is False: break else: newline = False if len(variables) > 1: repeat = clauses.Repeat( variables, expression, repeatdict=False, newline=newline) else: repeat = clauses.Repeat(variables, expression, newline=newline) _.append(repeat) # assign if self.assign is not None: for declaration, expression in self.assign: if len(declaration) != 1: raise ValueError("Can only assign single variable.") variable = declaration[0] _.append(clauses.Assign(expression, variable)) content = self.content omit = self.omit if self.define_slot: name = self.define_slot scope = set(itertools.chain(*self.stream.scope[1:])) # assemble arguments that we pass into the macro # fill-slot callback exclude = set((self.symbols.scope, self.symbols.slots, self.symbols.out, self.symbols.write)).union(self.stream.scope[0]) scope_args = tuple(variable for variable in scope if variable not in exclude) # look up fill-slot value _.append(clauses.Assign( types.template('%%(slots)s.get(%s)' % repr(name)), self.symbols.tmp)) # if slot has been filled, either use it as is (if # it's a string), or pass in required arguments (if # it's a callback function) _.append(clauses.Condition( types.template('%(tmp)s is not None'), (clauses.Condition( types.template('isinstance(%(tmp)s, basestring)'), (clauses.Slot( types.template("%(tmp)s(%(scope)s)"), scope_args),), finalize=True, invert=True), clauses.Else(( clauses.Write(types.template("%(tmp)s")),)) ))) _.append(clauses.Else()) # set dynamic content flag dynamic = content or self.translate is not None # if an attribute ordering is required, setting a default # trivial value for each attribute will ensure that the order # is preserved attributes = utils.odict() if self.attribute_ordering is not None: for name in self.attribute_ordering: attributes[name] = None # static attributes (including those with a namespace prefix) # are at the bottom of the food chain attributes.update(self.static_attributes) attributes.update(self.ns_attributes) # dynamic attributes dynamic_attrs = self.dynamic_attributes or () dynamic_attr_names = [] for variables, expression in dynamic_attrs: if len(variables) != 1: raise ValueError("Tuple definitions in assignment clause " "is not supported.") variable = variables[0] attributes[variable] = expression dynamic_attr_names.append(variable) # translated attributes translated_attributes = self.translated_attributes or () for variable, msgid in translated_attributes: if msgid: if variable in dynamic_attr_names: raise ValueError( "Message id not allowed in conjunction with " "a dynamic attribute.") value = types.value('"%s"' % msgid) if variable in attributes: default = repr(attributes[variable]) expression = clauses.translate_expression(value, default=default) else: expression = clauses.translate_expression(value) else: value = attributes.get(variable) if value is not None: if variable not in dynamic_attr_names: value = repr(value) expression = clauses.translate_expression(value) else: raise ValueError("Must be either static or dynamic " "attribute when no message id " "is supplied.") attributes[variable] = expression # tag text = self.text if omit is not True: _.append(clauses.Attrs(self.static_attributes, "_attrs_%d" % id(self.element))) selfclosing = not text and not dynamic and len(self.element) == 0 tag = clauses.Tag( self.tag, attributes, expression=self.dict_attributes, selfclosing=selfclosing, cdata=self.cdata is not None, defaults=self.static_attributes) if omit: _.append(clauses.Condition( omit, [tag], finalize=False, invert=True)) else: _.append(tag) # tag text (if we're not replacing tag body) if len(text) and not dynamic and not self.use_macro and not self.extend_macro: for part in text: if isinstance(part, types.expression): _.append(clauses.Write(part)) else: _.append(clauses.Out(part)) # dynamic content if content: msgid = self.translate if msgid is not None: if msgid: raise ValueError( "Can't use message id with dynamic content translation.") _.append(clauses.Assign(content, self.symbols.tmp)) content = clauses.translate_expression( types.value(self.symbols.tmp)) else: value = types.value(repr(utils.serialize(self.element, omit=True))) _.insert(0, clauses.Assign( value, "%s.value = %s" % ( self.symbols.default_marker_symbol, self.symbols.default))) _.append(clauses.Write(content)) # dynamic text elif self.translate is not None and \ True in map(lambda part: isinstance(part, types.expression), text): if len(self.element): raise ValueError( "Can't translate dynamic text block with elements in it.") init_stream = types.value('_init_stream()') init_stream.symbol_mapping['_init_stream'] = generation.initialize_stream subclauses = [] subclauses.append(clauses.Define( types.declaration((self.symbols.out, self.symbols.write)), init_stream)) for part in text: if isinstance(part, types.expression): subclauses.append(clauses.Write(part)) else: part = ' '.join(part.split()) if part != "": subclauses.append(clauses.Out(part)) # attempt translation subclauses.append(clauses.Assign( clauses.translate_expression( types.value('%(out)s.getvalue()'), default=None), self.symbols.tmp)) _.append(clauses.Group(subclauses)) _.append(clauses.Write(types.value(self.symbols.tmp))) # include elif self.include: # compute macro function arguments and create argument string arguments = [ "%s=%s" % (arg, arg) for arg in \ set(itertools.chain(*self.stream.scope[1:]))] # XInclude's are similar to METAL macros, except the macro # is always defined as the entire template. # first we compute the filename expression and write it to # an internal variable _.append(clauses.Assign(self.include, self.symbols.include)) # call template _.append(clauses.Write( types.template( "%%(xincludes)s.get(%%(include)s, %s).render_xinclude(%s)" % \ (repr(self.format), ", ".join(arguments))))) # use or extend macro elif self.use_macro or self.extend_macro: # assign macro value to variable macro = self.use_macro or self.extend_macro _.append(clauses.Assign(macro, self.symbols.metal)) # for each fill-slot element, create a new output stream # and save value in a temporary variable kwargs = [] callbacks = {} # determine variable scope scope = set(itertools.chain(adding, *self.stream.scope[1:])) - \ self.stream.scope[0] # we pass in all variables from the current scope (as # keyword arguments, to allow first use before potential # reassignment) callback_args = ", ".join( "%s=%s" % (arg, arg) for arg in scope if arg not in (self.symbols.slots, self.symbols.scope)) macro_args = ", ".join( "%s=%s" % (arg, arg) for arg in scope if arg not in (self.symbols.slots,)) # loop through macro fill slot elements and generate # callback methods; the reason why we use callbacks is # convenience: it's an easy fit with the compiler elements = [element for element in self.element.walk() if element.node and element.node.fill_slot] for element in elements: # make sure we're not in a nested macro block parent = element.getparent() while parent is not self.element: if parent.node.use_macro or parent.node.extend_macro: element = None break parent = parent.getparent() if element is None: continue # determine and register callback name name = element.node.fill_slot callbacks[name] = callback = "%s_%s" % ( self.symbols.callback, utils.normalize_slot_name(name)) # pass in remote scope to callback method; this is # done because macros may add global variables to the # scope, which should be made available to the calling # template visitor = clauses.Visit(element.node) tail = element.tail newline = tail and '\n' in tail _.append(clauses.Callback( callback, visitor, callback_args, newline)) # if we're extending the macro, the current slots will be # carried over to the macro extend = self.extend_macro is not None defines = set() if extend: for element in self.element.walk(): if element.node is not None: define_slot = element.node.define_slot if define_slot is not None: defines.add(define_slot) # format slot arguments slot_args = ", ".join("'%s': %s" % kwarg for kwarg in callbacks.items()) _.append(clauses.Macro( types.value("{%s}" % slot_args), macro_args, extend=extend, extend_except=defines, label=macro.label )) # translate body elif self.translate is not None: msgid = self.translate # subelements are either named or unnamed; if there are # unnamed elements, the message id must be dynamic named_elements = [e for e in self.element if e.node.translation_name] unnamed_elements = [e for e in self.element if not e.node.translation_name] if not msgid and named_elements and not unnamed_elements: msgid = self.create_msgid() elements = named_elements else: elements = self.element if msgid and not named_elements: elements = () if named_elements: mapping = "%s_%d" % (self.symbols.mapping, id(self.element)) _.append(clauses.Assign(types.value('{}'), mapping)) else: mapping = 'None' if unnamed_elements or not msgid: text = utils.htmlescape(self.element.text.replace('%', '%') or "") _.append(clauses.Assign(types.value(repr(text)), self.symbols.msgid)) # for each named block, create a new output stream # and use the value in the translation mapping dict for element in elements: init_stream = types.value('_init_stream()') init_stream.symbol_mapping[ '_init_stream'] = generation.initialize_stream subclauses = [] subclauses.append(clauses.Define( types.declaration((self.symbols.out, self.symbols.write)), init_stream)) subclauses.append(clauses.Visit(element.node)) # if the element is named, record it in the mapping if element in named_elements: name = element.node.translation_name subclauses.append(clauses.Assign( types.template('%(out)s.getvalue()'), "%s['%s']" % (mapping, name))) # when computing a dynamic message id, add a # reference to the named block if not msgid: if not unnamed_elements: subclauses.append(clauses.Assign( types.value(repr("${%s}" % name)), self.symbols.msgid)) else: subclauses.append(clauses.Assign( types.template('%(msgid)s + ' + repr("${%s}" % name) + ' + ' + repr(element.tail)), self.symbols.msgid)) # else add it to the dynamic message id else: subclauses.append(clauses.Assign( types.template('%(msgid)s + %(out)s.getvalue()'), self.symbols.msgid)) # XXX: note that this should read: # _.append(clauses.Group(subclauses)) # # but there's a problem with multiple temporary # variable assignments within the same block; this is # just an easy work-around _.append(clauses.Condition( types.value('True'), subclauses, finalize=True)) if msgid: value = types.value(repr(msgid)).replace('%', '%%') default = self.symbols.marker else: default = types.template('%(msgid)s') value = types.template("' '.join(%(msgid)s.split())") _.append(clauses.Assign( clauses.translate_expression( value, mapping=mapping, default=default), self.symbols.result)) # write translation to output if successful, otherwise # fallback to default rendition; result = types.value(self.symbols.result) result.symbol_mapping[self.symbols.marker] = i18n.marker if msgid: condition = types.template('%(result)s is not %(marker)s') _.append(clauses.Condition( condition, [clauses.UnicodeWrite(result)], finalize=True)) subclauses = [] if self.element.text: subclauses.append(clauses.Out( utils.htmlescape(self.element.text))) for element in self.element: name = element.node.translation_name if name: value = types.value("%s['%s']" % (mapping, name)) subclauses.append(clauses.UnicodeWrite(value)) for part in reversed(element.node.tail): if isinstance(part, types.expression): subclauses.append(clauses.Write(part)) else: subclauses.append(clauses.Out( utils.htmlescape(part))) else: subclauses.append(clauses.Visit(element.node)) if subclauses: _.append(clauses.Else(subclauses)) else: _.append(clauses.UnicodeWrite(result)) return _
def get_link(request, pk): link = Link.objects.get(pk=pk) return HttpResponse(serialize(link), content_type="application/json")
def friends(request, idx): friends_list = Profile.objects.get(pk=idx).friends.all().select_related() return HttpResponse(serialize(friends_list), mimetype='application/json')
def friends_of_friends(request, idx): friends_list = [friend.pk for friend in Profile.objects.get(pk=idx).friends.all().select_related()] excluded_list = friends_list[:] + [int(idx)] friends_of_friends_list = Profile.objects.filter(friends__in=friends_list).exclude(id__in=excluded_list).distinct() return HttpResponse(serialize(friends_of_friends_list), mimetype='application/json')
def __str__(self): return serialize(self, 'arms', 'x', 'theta')
def GET(self): data = web.input(query='') f = flipkart() ret = f.search(data['query']) return serialize(ret)
def __str__(self): return serialize(self, 'arms', 'x', 'ctrh', 'A', 'U', 'S', 'V', 'VT', 'users')
def capsule_view(request, cap_id): cap = Capsule.objects.get(pk=cap_id) return render(request, 'capsule_view.html', {'capsule': serialize(cap)})