def div(a, b, lv): e = Echo("div", lv) e.ask("What's the result of {0} / {1}?".format(to_string(a), to_string(b))) if b == 0: raise ZeroDivisionError e.answer("{0}.".format(a/b)) return a/b
def map_(f, lat, lv): e = Echo("map", lv) e.ask("What's the result of mapping {0} to each element of {1}".format(to_string(f), to_string(lat))) retval = [] for one in lat: retval.append(f(one, lv)) e.answer("It's {0}".format(to_string(retval))) return retval
def is_member(a, lat, lv): e = Echo("member?", lv) e.ask("Is {0} a member of {1}?".format(to_string(a), to_string(lat))) for atom in lat: if a == atom: e.answer("Yes.") return True e.answer("Nope.") return False
def is_eq(a, b, lv): e = Echo("eq?", lv) e.ask("Does {0} eq to {1}?".format(to_string(a), to_string(b))) if isa(a, str) and isa(b, str): e.answer("Yes.") if a==b else e.answer("Nope.") return a==b else: e.answer("No answer because eq? only accepts two non-numeric atoms.") return None
def cons(x, y, lv): e = Echo("cons", lv) e.ask("What's cons of {0} and {1}?".format(to_string(x), to_string(y))) if isa(y, list): e.answer("{0}.".format(to_string([x]+y))) return [x]+y else: e.answer("No answer, since the second argument to cons must be list.") return None
def getConfig(self): """Retrieves complete config of core. :return: map of `ConfigHolder` """ # TODO return dict([(section, ConfigHolder(section, data.name, data.description, data.long_desc, [ ConfigItem(option, d.name, d.description, d.type, to_string(d.default), to_string(self.core.config.get(section, option))) for option, d in data.config.iteritems()])) for section, data in self.core.config.getBaseSections()])
def cdr(x, lv): e = Echo("cdr", lv) e.ask("What's cdr of {0}?".format(to_string(x))) if isa(x, str): e.answer("No answer, since you cannot ask for the cdr of an atom.") return None elif isa(x, list): if x == []: e.answer("No answer, since you cannot ask for the cdr of the empty list.") return None else: e.answer("{0}.".format(to_string(x[1:]))) return x[1:] else: e.answer("No answer, because cdr is only for non-empty lists.") return None
def listen(self, log, noprint=True): if not len(log.topics) or log.topics[0] not in self.event_data: return types = self.event_data[log.topics[0]]['types'] name = self.event_data[log.topics[0]]['name'] names = self.event_data[log.topics[0]]['names'] indexed = self.event_data[log.topics[0]]['indexed'] indexed_types = [types[i] for i in range(len(types)) if indexed[i]] unindexed_types = [types[i] for i in range(len(types)) if not indexed[i]] # print('listen', encode_hex(log.data), log.topics) deserialized_args = decode_abi(unindexed_types, log.data) o = {} c1, c2 = 0, 0 for i in range(len(names)): if indexed[i]: topic_bytes = utils.zpad(utils.encode_int(log.topics[c1 + 1]), 32) o[names[i]] = decode_single(process_type(indexed_types[c1]), topic_bytes) c1 += 1 else: o[names[i]] = deserialized_args[c2] c2 += 1 o["_event_type"] = utils.to_string(name) if not noprint: print(o) return o
def analyze_macro_application(astnode): exp = astnode.exp macro, macro_args = exp[0].exp, astnode.get_exp() expanded = macro_expand(macro, macro_args) tostring = str(to_string(expanded)) parsed = analyze(list(parse(tostring))[-1]) return CodeObject(astnode, lambda env: parsed.exec_(env))
def car(x, lv, verbose=False): e = Echo("car", lv) e.ask("What's car of {0}?".format(to_string(x))) if isa(x, str): e.answer("No answer, because you cannot ask for the car of an atom.") return None elif isa(x, list): if x == []: e.answer("No answer, because you cannot ask for the car of the empty list.") return None else: e.answer("{0}.".format(to_string(x[0]))) return x[0] else: e.answer("No answer, because car is only for non-empty lists.") return None
def getInfo(self, plugin): info = {} if plugin in self.plugins and self.plugins[plugin].info: info = dict([(x, to_string(y)) for x, y in self.plugins[plugin].info.iteritems()]) return info
def analyze(astnode, toplevel=False): """ Analyze an AstNode and return a CodeObject. 'The result of calling analyze is the execution procedure to be applied to the environment [...] the procedures to which we dispatch perform only analysis, not full evaluation' [ABELSON et al., 1996] @param AstNode astnode @param bool toplevel @returns CodeObject """ try: exp = astnode.exp # Atomic data types and symbols if self_eval(exp): return CodeObject(astnode, lambda env: exp) elif isa(exp, Symbol): return CodeObject(astnode, lambda env: env.find(exp)[exp]) # Compound expressions elif isa(exp, list): if is_dict_literal(exp): return analyze_dict_literal(astnode) if is_if(exp): return analyze_if(astnode) elif is_begin(exp): return analyze_begin(astnode, toplevel) elif is_assignment(exp): return analyze_assignment(astnode) elif is_let(exp): return analyze_let(astnode) elif is_lambda_shorthand(exp): return analyze_lambda_shorthand(astnode) elif is_vardef(exp): return analyze_vardef(astnode) elif is_procdef(exp): return analyze_procdef(astnode) elif is_macrodef(exp): return analyze_macrodef(astnode, toplevel) elif is_macro_application(exp): return analyze_macro_application(astnode) elif is_quoted(exp): return analyze_quotation(astnode) elif is_builtin_proc_application(exp): return analyze_builtin_proc_application(astnode) elif is_attribute_access(exp): return analyze_attribute_access(astnode) else: # If we haven't been able to determine an expression type so far, # we assume that it is a procedure call. return analyze_procedure_application(astnode) else: raise TypeError("Unknown expression type: %s" % to_string(exp)) except Exception as e: add_exc_info(astnode, e) raise e
def is_number(s, lv): e = Echo("number?", lv) e.ask("Is {0} number?".format(to_string(s))) if isa(s, int) or isa(s, float): e.answer("Yes.") return True else: e.answer("Nope.") return False
def is_null(s, lv): e = Echo("null?", lv) e.ask("Is {0} an empty list?".format(to_string(s))) if isa(s, list): e.answer("Yes.") if s==[] else e.answer("Nope.") return s==[] else: e.answer("No answer since you can only ask null? of a list") return None
def getConfigValue(self, section, option): """Retrieve config value. :param section: name of category, or plugin :param option: config option :return: config value as string """ value = self.core.config.get(section, option) return to_string(value)
def getAllInfo(self): """returns info stored by addon plugins""" info = {} for name, plugin in self.plugins.iteritems(): if plugin.info: #copy and convert so str info[name] = dict( [(x, to_string(y)) for x, y in plugin.info.iteritems()]) return info
def is_zero(n, lv): e = Echo("zero?", lv) e.ask("Is {0} zero?".format(to_string(n))) if isa(n, int) or isa(n, float): e.answer("Yes.") if n==0 else e.answer("Nope.") if n==0: return True else: return False else: e.answer("No answer since you can only ask zero? of a number") return None
def gen_report(true_pts, pred_pts, pickle_name, params): tot_error = [] for true_pt, pred_pt in zip(true_pts, pred_pts): tot_error.append(distance(pred_pt, true_pt)) f_report = open('report.txt', 'a') report_content = to_string(params) # f_report.write(pickle_name + '\n') # f_report.write('Total Test size\t%d\n' % len(tot_error)) report_content.append(str(len(tot_error))) tot_error = sorted(tot_error) report_content += to_string([np.max(tot_error), \ np.min(tot_error), \ np.mean(tot_error), \ np.median(tot_error), \ tot_error[int(len(tot_error)*0.67)], \ tot_error[int(len(tot_error)*0.8)], \ tot_error[int(len(tot_error)*0.9)] \ ]) f_report.write(','.join(report_content) + '\n') f_report.flush()
def enc(typ, arg): base, sub, arrlist = typ sz = get_size(typ) # Encode dynamic-sized strings as <len(str)> + <str> if base in ('string', 'bytes') and not sub: assert isinstance(arg, (str, bytes, utils.unicode)), \ "Expecting a string" return enc(lentyp, len(arg)) + \ utils.to_string(arg) + \ b'\x00' * (utils.ceil32(len(arg)) - len(arg)) # Encode dynamic-sized lists via the head/tail mechanism described in # https://github.com/ethereum/wiki/wiki/Proposal-for-new-ABI-value-encoding elif sz is None: assert isinstance(arg, list), \ "Expecting a list argument" subtyp = base, sub, arrlist[:-1] subsize = get_size(subtyp) myhead, mytail = b'', b'' if arrlist[-1] == []: myhead += enc(lentyp, len(arg)) else: assert len(arg) == arrlist[-1][0], \ "Wrong array size: found %d, expecting %d" % \ (len(arg), arrlist[-1][0]) for i in range(len(arg)): if subsize is None: myhead += enc(lentyp, 32 * len(arg) + len(mytail)) mytail += enc(subtyp, arg[i]) else: myhead += enc(subtyp, arg[i]) return myhead + mytail # Encode static-sized lists via sequential packing else: if arrlist == []: return utils.to_string(encode_single(typ, arg)) else: subtyp = base, sub, arrlist[:-1] o = b'' for x in arg: o += enc(subtyp, x) return o
def __setattr__(self, attr, value): """set a node attribute""" if attr.startswith('_') or \ isinstance(getattr(self.__class__, attr, None), property): object.__setattr__(self, attr, value) else: data = self.normalized_data() if value is None: value = [] else: value = utils.to_string(value) data[attr] = value
def build_file_sizes(self, copy_dict): """Called by self.parse(). Build the value of the 'min_filesize' and 'max_filesize' options and store them in the options dictionary. Args: copy_dict (dict): Copy of the original options dictionary. """ if copy_dict['min_filesize']: copy_dict['min_filesize'] = \ utils.to_string(copy_dict['min_filesize']) + \ copy_dict['min_filesize_unit'] if copy_dict['max_filesize']: copy_dict['max_filesize'] = \ utils.to_string(copy_dict['max_filesize']) + \ copy_dict['max_filesize_unit']
def user(user_id=None): if request.method == 'POST': app.logger.debug('Args: {}'.format(request.args)) first = to_string(request.args.get('firstName', '')) last = to_string(request.args.get('lastName', '')) phone = to_string(request.args.get('phoneNum', '')) if not first or not last or not phone: return json_response({'response': \ 'Either firstName or lastName or phoneNum missing in params '}, 400) phone = sanitize_number(phone) user_query = User.query_by_phone(phone=phone) if not user_query: user = User.create(first, last, phone) return json_response({'response': 'Created', 'id': user.id}) else: return json_response({'response': 'User with above phone is already registered', \ 'id': user_query.id}, status_code=400) elif request.method == 'GET': if user_id: user = User.query_user_by(user_id) app.logger.debug('user: {}'.format(user)) return jsonify(firtname=user.first, lastname=user.last, phone=user.phone, id=user.id) else: response_list = list() users = User.query_user_by() app.logger.debug('user: {}'.format(users)) if users: for user in users: response_list.append({ 'first': user.first, 'last': user.last, 'phone': user.phone, 'id': user.id }) return json_response(response_list)
def call(self, plugin, func, arguments): """Calls a service (a method in addon plugin). :raises: ServiceDoesNotExists, when its not available :raises: ServiceException, when a exception was raised """ if not self.hasService(plugin, func): raise ServiceDoesNotExists(plugin, func) try: ret = self.core.addonManager.callRPC(plugin, func, arguments) return to_string(ret) except Exception, e: raise ServiceException(e.message)
def repo_collaborators(reponame): """View list of members and access level within certain repos""" try: table_data = [ ['User', 'Admin', 'Push', "Pull"], ] collabs = get_repo_collabs(reponame) for collab in collabs: admin = collab.get('permissions', {}).get('admin', False) push = collab.get('permissions', {}).get('push', False) pull = collab.get('permissions', {}).get('pull', False) table_data.append([ collab.get('login'), to_string(admin), to_string(push), to_string(pull) ]) table = AsciiTable(table_data, reponame) print(table.table) print("\n") except Exception: #(u"{} not found".format(url)) print("Repo does not exist with Docker Github")
def restaurant(restaurant_id=None): app.logger.debug('Args: {}'.format(request.args)) restaurant_name = to_string(request.args.get('name', '')) category = to_string(request.args.get('category', '')) if request.method == 'POST': if not restaurant_name or not category: return json_response( {'response': 'Either name or category is missing in params'}, 400) result = Restaurant.create(restaurant_name, category) return json_response({'response': 'Created', 'id': result.id}) elif request.method == 'PUT': # restaurant_id = int(request.args.get('id', '')) app.logger.debug('Restaurant_id: {}'.format(restaurant_id)) if not restaurant_name and not category: json_response( {'response': 'Either name or category is missing in params'}, 400) result = Restaurant.update(restaurant_id, restaurant_name, category) app.logger.debug('result: {}'.format(result)) return json_response({'response': 'Updated', 'id': result.id}) else: return json_response({'response': 'BAD Request'}, 400)
def move(self, from_path, to_path, root='app_folder'): if root not in ('app_folder', 'kuaipan'): root = 'app_folder' if type(from_path) not in (str, unicode) or not from_path: raise OpenAPIArgumentError if type(from_path) not in (str, unicode) or not to_path: raise OpenAPIArgumentError parameters = self._oauth_parameter() parameters['root'] = root parameters['from_path'] = to_string(from_path) parameters['to_path'] = to_string(to_path) base_url = FILEOPS_MOVE_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: return True else: raise OpenAPIHTTPError(status, rf.read())
def upload_file(self, path, overwrite=True, root='app_folder'): if root not in ('app_folder', 'kuaipan'): root = 'app_folder' if type(path) not in (str, unicode) or not path: raise OpenAPIArgumentError parameters = self._oauth_parameter() parameters['overwrite'] = overwrite parameters['root'] = root parameters['path'] = to_string(path) base_url = self.upload_locate() + '/1/fileops/upload_file' s = self._sig_request_url(base_url, parameters, 'post') return s
def reduce_key(key, value, minimize_columns=False): """Flattens the value of the key and appends the keys in the value dictionary to parent key. """ reduced_item = {} if type(value) is list: '''Reduction Condition 1: value of the key is a list''' if minimize_columns and is_simple_list(value): '''If the value is a simple list i.e. the list is a list of strings or integers, group all the values under single column''' reduced_item[repr_key(key)] = (to_string(value), True) else: i = 0 for sub_item in value: '''Create a new column for each of the index in the list.''' reduced_item.update( reduce_key("%s" % (repr_compound_list(key, i)), sub_item, minimize_columns=minimize_columns)) i = i + 1 elif type(value) is dict: '''Reduction Condition 1: value of the key is a dictionary''' for sub_key, sub_value in value.items(): sub_reduced_items = reduce_key(sub_key, sub_value, minimize_columns=minimize_columns) for _key, _value in sub_reduced_items.items(): reduced_item["%s.%s" % (key, repr_key(_key))] = _value else: reduced_item[to_string(key)] = (to_string(value), False) return reduced_item
def upload_locate(self, source_ip=None): parameters = self._oauth_parameter() if source_ip: parameters['source_ip'] = source_ip base_url = FILEOPS_UPLOAD_LOCATE_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: d = json.loads(rf.read()) return to_string(d[u'url']) else: raise OpenAPIHTTPError(status, rf.read())
def get_alternate_names_wd(entities_id): sparql = SPARQLWrapper("https://query.wikidata.org/sparql") sparql.setQuery(""" SELECT DISTINCT ?p ?alias WHERE { VALUES ?p { """ + " ".join(entities_id) + """ }. ?p skos:altLabel ?alias FILTER (LANG (?alias) = "en") } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() return [(to_wd_identifier(r['p']), to_string(r['alias'])) for r in results['results']['bindings']]
def __init__(self, uid=None, dn=None, conn=None, attrs=None): object.__init__(self) if conn and not self._conn: self._conn = conn self._dn = None self._update_dn(uid=uid, dn=dn) if attrs: self._data = self._defaults.copy() for k, v in attrs.items(): if isinstance(v, (list, tuple)) and len(v) == 1: v = v[0] self._data[k] = utils.to_string(v) elif attrs is not None: self._data = attrs else: self._data = None
def __init__(self, stream, line_i = 0, fil='', do_comments=True): if isinstance(stream, (str, unicode)): stream = io.StringIO(to_string(stream)) self.stream = stream self.line_i = line_i comments_internal = ('comment' if do_comments else 'ignore') self.start_end = [BeginEnd('[', ']', 'aref'), BeginEnd('(', ')', 'call'), BeginEnd('{', '}', 'seq'), BeginEnd(';', '\n', 'comment', internal=comments_internal, ignore_alt_end=True, ignore_as_alt_end=True), BeginEnd('"', '"', 'str', internal='str')] self.n_max = 16 self.fil = fil # Current file.
def get_alternate_names_ulan(entities_id): sparql = SPARQLWrapper("http://vocab.getty.edu/sparql") sparql.setQuery(""" SELECT DISTINCT ?p ?alias WHERE { VALUES ?p { """ + " ".join(entities_id) + """ }. ?p (xl:altLabel|xl:prefLabel)/gvp:term ?alias . #VALUES ?l {xl:altLabel xl:prefLabel} } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() return [(to_ulan_identifier(r['p']), to_string(r['alias'])) for r in results['results']['bindings']]
def shares(self, path, root='app_folder'): parameters = self._oauth_parameter() if root not in ('app_folder', 'kuaipan'): root = 'app_folder' path = quote(path) base_url = SHARE_BASE_URL % (root, path) s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: d = json.loads(rf.read()) return to_string(d[u'url']) else: raise OpenAPIHTTPError(status, rf.read())
def append(self, node, save=True): """append a subnode""" if self.dn: if node.rdn: value = utils.to_string(getattr(node, node.rdn)) node._dn = '%s=%s,%s' % (node.rdn, value, self.dn) node.bind(self._conn) if save: try: self._conn.get_dn(node.dn) except ValueError: self._conn.add(node) else: self._conn.save(node) else: raise ValueError('%r need a _rdn attr' % node) else: raise AttributeError('%r is not bound to a connection' % self)
def fletcher_recieve(data): """ m: is the part of result_msg that its in fact a message c1: is the part of result_msg that is the first mod c2: is the part of result_msg that is the second mod """ m, c1, c2 = get_params(data) # c1_n: is the new c1, calculated for the message we recieve # c2_n: is the new c2, calculated for the message we recieve c1_n, c2_n = fletcher_checksum(m) print("C1: ", to_int(c1), to_int(c1_n)) # only for debugging print("C2: ", to_int(c2), to_int(c2_n)) # only for debugging # If they dont concide there is a corruption somewhere if (to_int(c1_n) != to_int(c1) and to_int(c2_n) != to_int(c2)): print("ERROR, mensaje posiblemente corrupto") print("MENSAJE: ", to_string(m)) # prints message
def __init__(self, stream, line_i = 0, start_end = None, do_comments=True, earliest_macro={}, fil='', handle = lambda a,b: str(a).split()): if isinstance(stream, (str, unicode)): stream = io.StringIO(to_string(stream)) self.stream = stream self.line_i = line_i self.start_end = start_end if start_end is None: comment_internal = ('comment' if do_comments else 'ignore') self.start_end = [BeginEnd('(', ')', 'call'), BeginEnd(';', '\n', 'comment', internal=comment_internal, ignore_alt_end=True, ignore_as_alt_end=True), BeginEnd('"', '"', 'str', internal='str')] self.n_max = 16 self.fil = fil # Current file. self.handle = handle
def get_wd_entities(): sparql = SPARQLWrapper("https://query.wikidata.org/sparql") sparql.setQuery(""" SELECT ?p ?pLabel WHERE { { SELECT ?p WHERE { ?p wdt:P106 ?class . FILTER (?class IN (wd:Q1028181, wd:Q1281618, wd:Q329439, wd:Q42973)) . # Painter, engraver, sculptor, architect #BIND (wd:Q5592 AS ?p) . ?p p:P569/psv:P569 ?birth_date_node . ?birth_date_node wikibase:timeValue ?birth_date . FILTER (year(?birth_date) < 1900) } GROUP BY ?p } . #?p skos:altLabel ?alias FILTER (LANG (?alias) = "en") SERVICE wikibase:label { bd:serviceParam wikibase:language "en,it,fr,de" } }""") sparql.setReturnFormat(JSON) results = sparql.query().convert() entities = { to_wd_identifier(r['p']): { 'label': to_string(r['pLabel']) } for r in results['results']['bindings'] } batch_size = 400 all_alternate_namings = defaultdict(list) all_ids = list(entities.keys()) for i in tqdm(range(0, len(entities) // batch_size + 1)): id_aliases_pairs = get_alternate_names_wd( all_ids[i * batch_size:(i + 1) * batch_size]) if len(id_aliases_pairs) == 0: break for _id, alternate_alias in id_aliases_pairs: all_alternate_namings[_id].append(alternate_alias) for _id in all_ids: entities[_id]['alternateLabels'] = list(set( all_alternate_namings[_id])) return entities
def accessToken(self): parameters = self._oauth_parameter() base_url = ACCESS_TOKEN_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: d = json.loads(rf.read()) for k in (u"oauth_token", u"oauth_token_secret", u"user_id", u"charged_dir"): if d.has_key(k): v = d.get(k) setattr(self, to_string(k), safe_value(v)) else: raise OpenAPIHTTPError(status, rf.read())
def requestToken(self, callback=None): parameters = self._oauth_parameter(has_token=False) if callback: parameters['oauth_callback'] = callback base_url = REQUEST_TOKEN_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: d = json.loads(rf.read()) for k in (u"oauth_token", u"oauth_token_secret", u"oauth_callback_confirmed"): if d.has_key(k): v = d.get(k) setattr(self, to_string(k), safe_value(v)) else: raise OpenAPIHTTPError(status, rf.read())
def account_info(self): parameters = self._oauth_parameter() base_url = ACCOUNT_INFO_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: d = json.loads(rf.read()) for k in (u"max_file_size", u"user_name", u"user_id", u"quota_total", u"quota_used", u"quota_recycled"): if d.has_key(k): v = d.get(k) setattr(self, to_string(k), safe_value(v)) else: raise OpenAPIHTTPError(status, rf.read())
def create_folder(self, path, root='app_folder'): if root not in ('app_folder', 'kuaipan'): root = 'app_folder' if type(path) not in (str, unicode) or not path : raise OpenAPIArgumentError parameters = self._oauth_parameter() parameters['root'] = root parameters['path'] = to_string(path) base_url = FILEOPS_CREATE_BASE_URL s = self._sig_request_url(base_url, parameters) rf = urllib.urlopen(s) status = rf.getcode() if status == 200: js = rf.read() return js else: raise OpenAPIHTTPError(status, rf.read())
def decint(n, signed=False): if isinstance(n, str): n = utils.to_string(n) if is_numeric(n): min, max = (-TT255, TT255 - 1) if signed else (0, TT256 - 1) if n > max or n < min: raise EncodingError("Number out of range: %r" % n) return n elif is_string(n): if len(n) == 40: n = decode_hex(n) if len(n) > 32: raise EncodingError("String too long: %r" % n) i = big_endian_to_int(n) return (i - TT256) if signed and i >= TT255 else i elif n is True: return 1 elif n is False or n is None: return 0 else: raise EncodingError("Cannot encode integer: %r" % n)
def val(batch_manager, model, discr, w_discr, logger=None, epoch=0): # Initialize meters. data_time = utils.AverageMeter() net_time = utils.AverageMeter() loss_meter_model = utils.AverageMeter() loss_meter_discr = utils.AverageMeter() eval_meter_model = utils.AverageMeter() eval_meter_discr = utils.AverageMeter() # Do the job. loader = batch_manager.loader model.model.eval() discr.model.eval() t0 = time.time() for i, (inputs_model, targets_model, _) in enumerate(loader): # Set variables. targets_model = targets_model.cuda(async=True) # Measure data time. data_time.update(time.time() - t0) t0 = time.time() # Model forward. outputs_model, inputs_discr = model.model( Variable(inputs_model, volatile=True)) # Make discriminator labels. targets_discr = outputs_model.data.max(1)[1].eq(targets_model) targets_discr = (1 - targets_discr).unsqueeze(1).float() # Discriminator forward. outputs_discr = discr.model(inputs_discr.detach()) # Discriminator loss forward. loss_discr = discr.criterion(outputs_discr, Variable(targets_discr, volatile=True)) eval_discr = batch_manager.evaluator_discr(outputs_discr, targets_discr) # Model loss forward. loss_model = model.criterion(outputs_model, Variable(targets_model, volatile=True)) \ + w_discr * discr.criterion(discr.model(inputs_discr), Variable(targets_discr.fill_(0), volatile=True)) eval_model = batch_manager.evaluator_model(outputs_model, targets_model) # Accumulate statistics. loss_meter_model.update(loss_model.data[0], targets_model.size(0)) loss_meter_discr.update(loss_discr.data[0], targets_discr.size(0)) eval_meter_model.update(eval_model, targets_model.size(0)) eval_meter_discr.update(eval_discr, targets_discr.size(0)) # Measure network time. net_time.update(time.time() - t0) t0 = time.time() # Print iteration. print('Epoch {0} Batch {1}/{2} ' 'T-data {data_time.val:.2f} ({data_time.avg:.2f}) ' 'T-net {net_time.val:.2f} ({net_time.avg:.2f}) ' 'M-loss {loss_model.val:.2f} ({loss_model.avg:.2f}) ' 'M-eval {eval_model_val} ({eval_model_avg}) ' 'D-loss {loss_discr.val:.2f} ({loss_discr.avg:.2f}) ' 'D-eval {eval_discr_val} ({eval_discr_avg})'.format( epoch, i + 1, len(loader), data_time=data_time, net_time=net_time, loss_model=loss_meter_model, eval_model_val=utils.to_string(eval_meter_model.val), eval_model_avg=utils.to_string(eval_meter_model.avg), loss_discr=loss_meter_discr, eval_discr_val=utils.to_string(eval_meter_discr.val), eval_discr_avg=utils.to_string(eval_meter_discr.avg))) # Summerize results. perform = eval_meter_model.avg if not isinstance(perform, Iterable): perform = [perform] if not logger is None: logger.write([epoch, loss_meter_model.avg] + perform + [loss_meter_discr.avg, eval_meter_discr.avg]) print('Summary of validation at epoch {epoch:d}.\n' ' Number of samples: {num_sample:d}\n' ' Number of batches: {num_batch:d}\n' ' Total time for data: {data_time:.2f} sec\n' ' Total time for network: {net_time:.2f} sec\n' ' Total time: {total_time:.2f} sec\n' ' Average model loss: {avg_loss_model:.4f}\n' ' Average model performance: {avg_perf_model}\n' ' Average discriminator loss: {avg_loss_discr:.4f}\n' ' Average discriminator performance: {avg_perf_discr}'.format( epoch=epoch, num_sample=loss_meter_model.count, num_batch=len(loader), data_time=data_time.sum, net_time=net_time.sum, total_time=data_time.sum + net_time.sum, avg_loss_model=loss_meter_model.avg, avg_perf_model=utils.to_string(eval_meter_model.avg, '%.4f'), avg_loss_discr=loss_meter_discr.avg, avg_perf_discr=utils.to_string(eval_meter_discr.avg, '%.4f'))) return perform[0]
def get(self, key): return self._get(self.root_node, bin_to_nibbles(utils.to_string(key)))
def __set__(self, instance, value): data = instance.normalized_data() if value is None: data[self.name] = [] else: data[self.name] = utils.to_string(value)
def main(): parser = argparse.ArgumentParser(description='Summarize training results') parser.add_argument('--root', default='/home/dgyoo/workspace/dataout/dl-frame', metavar='DIR', type=str, help='root to the log files') parser.add_argument( '--target-index', default=2, metavar='N', type=int, help= 'target evaluation metric index in the log files (a column index of a log file)' ) parser.add_argument('--decimal-places', default=2, metavar='N', type=int, help='decimal places') args = parser.parse_args() # Find log files and directories. log_dirs = [] for root, _, files in os.walk(args.root): for f in files: if f.endswith('.log'): log_dirs.append(root) log_dirs = [log_dir.replace(',', '~') for log_dir in log_dirs] log_dirs = sorted(list(set(log_dirs))) log_dirs = [log_dir.replace('~', ',') for log_dir in log_dirs] # Find the base directory. splits = log_dirs[0].split(os.sep) for i in range(len(splits)): if not all( [os.path.join(*splits[:i + 1]) in log_dir for log_dir in log_dirs]): break base_dir = os.path.join(*splits[:i]) base_dir = log_dirs[0][:log_dirs[0].find(base_dir) + len(base_dir) + 1] # Do the job. skips = [] form = '%.{:d}f'.format(args.decimal_places) print('Summerizing results in {}'.format(base_dir)) for log_dir in log_dirs: logger_train = utils.Logger(os.path.join(log_dir, 'train.log')) logger_val = utils.Logger(os.path.join(log_dir, 'val.log')) if len(logger_train) != len(logger_val): skips.append(log_dir) continue log_train = logger_train.read() log_val = logger_val.read() targets = [log[args.target_index] for log in log_val] index = targets.index(max(targets)) print('E {:02d} | TL {} TE {} | VL {} VE {} | {}'.format( int(log_train[index][0]), utils.to_string(log_train[index][1], form), utils.to_string(log_train[index][2:], form), utils.to_string(log_val[index][1], form), utils.to_string(log_val[index][2:], form), log_dir[len(base_dir):])) for skip in skips: print('Skip {} since len(train) != len(val)'.format( skip[len(base_dir):]))
def __init__(self, url, options): self.url = url self.options = options self.object_id = hash(url + to_string(options)) self.reset()
def __repr__(self): return to_string(lr=self.get_lr(), t_max=self.T_max, eta_min=self.eta_min, period_multipler=self.period_multiplier, last_epoch=self.last_epoch)
def parse(self, media_data_obj, options_manager_obj, operation_type='real'): """Called by downloads.DownloadWorker.prepare_download() and mainwin.MainWin.update_textbuffer(). Converts the download options stored in the specified options.OptionsManager object into a list of youtube-dl command line options. Args: media_data_obj (media.Video, media.Channel, media.Playlist, media.Folder): The media data object being downloaded options_manager_obj (options.OptionsManager): The object containing the download options for this media data object operation_type (str): 'sim', 'real', 'custom', 'classic' (matching possible values of downloads.DownloadManager.operation_type) Returns: List of strings with all the youtube-dl command line options """ # Force youtube-dl's progress bar to be outputted as separate lines options_list = ['--newline'] # Create a copy of the dictionary... copy_dict = options_manager_obj.options_dict.copy() # ...then modify various values in the copy. Set the 'save_path' option self.build_save_path(media_data_obj, copy_dict, operation_type) # Set the 'video_format' option and 'all_formats' options self.build_video_format(media_data_obj, copy_dict, operation_type) # Set the 'min_filesize' and 'max_filesize' options self.build_file_sizes(copy_dict) # Set the 'limit_rate' option self.build_limit_rate(copy_dict) # Parse basic youtube-dl command line options for option_holder_obj in self.option_holder_list: # First deal with special cases... if option_holder_obj.name == 'extract_audio': if copy_dict['audio_format'] == '': value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) elif option_holder_obj.name == 'audio_format': value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append('-x') options_list.append(option_holder_obj.switch) options_list.append(utils.to_string(value)) # The '-x' / '--audio-quality' switch must precede the # '--audio-quality' switch, if both are used # Therefore, if the current value of the 'audio_quality' # option is not the default value ('5'), then insert the # '--audio-quality' switch into the options list right # now if copy_dict['audio_quality'] != '5': options_list.append('--audio-quality') options_list.append( utils.to_string(copy_dict['audio_quality']), ) elif option_holder_obj.name == 'audio_quality': # If the '--audio-quality' switch was not added by the code # block just above, then follow the standard procedure if option_holder_obj.switch not in options_list: if option_holder_obj.check_requirements(copy_dict): value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) options_list.append(utils.to_string(value)) elif option_holder_obj.name == 'match_filter' \ or option_holder_obj.name == 'external_arg_string' \ or option_holder_obj.name == 'pp_args': value = copy_dict[option_holder_obj.name] if value != '': options_list.append(option_holder_obj.switch) options_list.append('"' + utils.to_string(value) + '"') elif option_holder_obj.name == 'subs_lang_list': # Convert the list to a comma-separated string, that the # 'subs_lang' option can use lang_list = copy_dict[option_holder_obj.name] if lang_list: comma = ',' options_list.append('--sub-lang') options_list.append(comma.join(lang_list)) # For all other options, just check the value is valid elif option_holder_obj.check_requirements(copy_dict): value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) if not option_holder_obj.is_boolean(): options_list.append(utils.to_string(value)) # Parse the 'extra_cmd_string' option, which can contain arguments # inside double quotes "..." (arguments that can therefore contain # whitespace) parsed_list = utils.parse_ytdl_options(copy_dict['extra_cmd_string']) for item in parsed_list: options_list.append(item) # Parse the 'match_title_list' and 'reject_title_list' for item in copy_dict['match_title_list']: options_list.append('--match-title') options_list.append(item) for item in copy_dict['reject_title_list']: options_list.append('--reject-title') options_list.append(item) # Parsing complete return options_list
'c1', 'c2', 'c1_n', 'c2_n', 'corrupted?', 'correct' ] rows = [] for i in range(0, 999): msg = 'AAAAAAAAAAAAAaaaaaaaaaaaaaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBBBBb' # Add error result_msg, original_msg, real_msg = message(msg) m, c1, c2 = get_params(result_msg) # Calculate new Cs c1_n, c2_n = fletcher_checksum(m) if to_string(original_msg) == to_string(result_msg): correct = True else: correct = False # Make row rows.append([ to_string(original_msg), to_string(result_msg), to_int(c1), to_int(c2), to_int(c1_n), to_int(c2_n), False if (to_int(c1_n) != to_int(c1) and to_int(c2_n) != to_int(c2)) else True, correct ])
def MyRMSProp(eta, g, epoch=10): Log("RMSProp With Learning Rate %.6f Decay Rate:%.4f \n" % (eta, g)) hidden_dim = 200 n_vocab = utils.n_vocab batch = 50 parameters = [] model = 'Models/RMSProp/model_RMSProp_%.6f_%.4f_.pkl' % (eta, g) #print(model) eta = eta decay = 0.9 inp = edf.Value() edf.params = [] C2V = edf.Param(edf.xavier((n_vocab, hidden_dim))) # forget gate Wf = edf.Param(edf.xavier((2 * hidden_dim, hidden_dim))) bf = edf.Param(np.zeros((hidden_dim))) # input gate Wi = edf.Param(edf.xavier((2 * hidden_dim, hidden_dim))) bi = edf.Param(np.zeros((hidden_dim))) # carry cell Wc = edf.Param(edf.xavier((2 * hidden_dim, hidden_dim))) bc = edf.Param(np.zeros((hidden_dim))) # output cell Wo = edf.Param(edf.xavier((2 * hidden_dim, hidden_dim))) bo = edf.Param(np.zeros((hidden_dim))) V = edf.Param(edf.xavier((hidden_dim, n_vocab))) parameters.extend([C2V, Wf, bf, Wi, bi, Wc, bc, Wo, bo, V]) # load the trained model if exist if os.path.exists(model): with open(model, 'rb') as f: p_value = pickle.load(f) idx = 0 for p in p_value: parameters[idx].value = p idx += 1 def LSTMCell(xt, h, c): f = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wf), bf)) i = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wi), bi)) o = edf.Sigmoid(edf.Add(edf.VDot(edf.ConCat(xt, h), Wo), bo)) c_hat = edf.Tanh(edf.Add(edf.VDot(edf.ConCat(xt, h), Wc), bc)) c_next = edf.Add(edf.Mul(f, c), edf.Mul(i, c_hat)) h_next = edf.Mul(o, edf.Tanh(c_next)) return h_next, c_next def BuildModel(): edf.components = [] B = inp.value.shape[0] T = inp.value.shape[1] h = edf.Value(np.zeros((B, hidden_dim))) c = edf.Value(np.zeros((B, hidden_dim))) score = [] for t in range(T - 1): wordvec = edf.Embed(edf.Value(inp.value[:, t]), C2V) xt = edf.Reshape(wordvec, [-1, hidden_dim]) h_next, c_next = LSTMCell(xt, h, c) p = edf.SoftMax(edf.VDot(h_next, V)) logloss = edf.Reshape( edf.LogLoss(edf.Aref(p, edf.Value(inp.value[:, t + 1]))), (B, 1)) if t == 0: loss = logloss else: loss = edf.ConCat(loss, logloss) score.append(p) h = h_next c = c_next masks = np.zeros((B, T - 1), dtype=np.int32) masks[inp.value[:, 1:] != 0] = 1 loss = edf.MeanwithMask(loss, edf.Value(masks)) return loss, score def CalPerp(score): prob = [p.value for p in score] prob = np.transpose(np.stack(prob, axis=0), (1, 0, 2)) B = prob.shape[0] T = prob.shape[1] V = prob.shape[2] masks = np.zeros((B, T), dtype=np.int32) masks[inp.value[:, 1:] != 0] = 1 prob = prob.reshape(-1) idx = np.int32(inp.value[:, 1:].reshape(-1)) outer_dim = len(idx) inner_dim = len(prob) / outer_dim pick = np.int32(np.array(range(outer_dim)) * inner_dim + idx) prob = prob[pick].reshape(B, T) return -np.sum(np.log(prob[np.nonzero(prob * masks)])) def Predict(max_step, prefix): edf.components = [] T = max_step h = edf.Value(np.zeros((1, hidden_dim))) c = edf.Value(np.zeros((1, hidden_dim))) prediction = [] for t in range(T): if t < len(prefix): pred = edf.Value(prefix[t]) prediction.append(pred) else: prediction.append(pred) wordvec = edf.Embed(pred, C2V) xt = edf.Reshape(wordvec, [-1, hidden_dim]) h_next, c_next = LSTMCell(xt, h, c) p = edf.SoftMax(edf.VDot(h_next, V)) pred = edf.ArgMax(p) h = h_next c = c_next edf.Forward() idx = [pred.value for pred in prediction] stop_idx = utils.to_index('}') if stop_idx in idx: return idx[0:idx.index(stop_idx) + 1] else: return idx def Eval(data, cnt): perp = 0. avg_loss = 0. test_batches = range(0, len(data), batch) test_minbatches = [data[idx:idx + batch] for idx in test_batches] for minbatch in test_minbatches: x_padded = utils.make_mask(minbatch) inp.set(x_padded) loss, score = BuildModel() edf.Forward() avg_loss += loss.value perp += CalPerp(score) perp = np.exp(perp / cnt) avg_loss /= len(test_batches) return perp, avg_loss ############################################### training loop ##################################################### batches = range(0, len(train_data), batch) minbatches = [train_data[idx:idx + batch] for idx in batches] epoch = epoch # initial Perplexity and loss #perp, loss = Eval(valid_data, vacnt) #print("Initial: Perplexity: %0.5f Avg loss = %0.5f" % (perp, loss)) #best_loss = loss #prefix = 'the agreements bring' #generation = Predict(400, utils.to_idxs(prefix)) #print("Initial generated sentence ") #print (utils.to_string(generation)) for ep in range(epoch): perm = np.random.permutation(len(minbatches)).tolist() stime = time() for k in range(len(minbatches)): minbatch = minbatches[perm[k]] x_padded = utils.make_mask(minbatch) inp.set(x_padded) loss, score = BuildModel() edf.Forward() edf.Backward(loss) edf.GradClip(10) edf.RMSProp(eta, g) duration = (time() - stime) / 60. perp, loss = Eval(valid_data, vacnt) Log("Epoch %d: Perplexity: %0.5f Avg loss = %0.5f [%.3f mins]" % (ep, perp, loss, duration)) if (ep == epoch - 1): # generate some text given the prefix and trained model prefix = 'the agreements bring' generation = Predict(400, utils.to_idxs(prefix)) Log("Epoch %d: generated sentence " % ep) Log(utils.to_string(generation)) #if loss < best_loss: # save the model best_loss = loss f = open(model, 'wb') p_value = [] for p in parameters: p_value.append(p.value) pickle.dump(p_value, f) #Save the hyperparameters f_hyper = open("HyperParameters.txt", "a") f_hyper.write( "RMSProp LearningRate: %.6f Decay_Rate: %.4f Epoch: %d BestLoss: %0.5f Perplexity: %0.5f\n" % (eta, g, ep, best_loss, perp)) if (ep == epoch - 1): f_hyper.write("\n\n") f_hyper.close() Log("\n")
def format_string(self, x): x = to_string(self.vocab, x) return {str(i): [v] for i, v in enumerate(x)}
############################################### training loop ##################################################### np.seterr(all='raise') batches = range(0, len(train_data), batch) minbatches = [train_data[idx:idx + batch] for idx in batches] epoch = 30 # initial Perplexity and loss perp, loss = Eval(valid_data, vacnt) print("Initial: Perplexity: %0.5f Avg loss = %0.5f" % (perp, loss)) best_loss = loss prefix = 'the agreements bring' generation = Predict(400, utils.to_idxs(prefix)) print("Initial generated sentence ") print(utils.to_string(generation)) for ep in range(epoch): perm = np.random.permutation(len(minbatches)).tolist() stime = time() for k in range(len(minbatches)): minbatch = minbatches[perm[k]] x_padded = utils.make_mask(minbatch) inp.set(x_padded) loss, score = BuildModel() edf.Forward() edf.Backward(loss) edf.GradClip(10)
from error_simulation import to_bitarray, message from utils import to_string, MOD_BITARRAY, from_int_to_bitarray, to_int, get_params if __name__ == "__main__": while True: msg = input("Mensaje: ") # Do something result_msg, original_msg, real_msg = message(msg) m, c1, c2 = get_params(result_msg) print("C1: ", to_int(c1)) print("C2: ", to_int(c2)) print("MENSAJE: ", to_string(m))
def test_conversion_list_to_string(self): self.assertEqual(to_string(["a", "l", "a"]), "ala")
def parse(self, download_item_obj, options_dict): """Called by downloads.DownloadWorker.prepare_download(). Converts the download options stored in the specified options.OptionsManager object into a list of youtube-dl command line options. Args: download_item_obj (downloads.DownloadItem) - The object handling the download options_dict (dict): Python dictionary containing download options; taken from options.OptionsManager.options_dict Returns: List of strings with all the youtube-dl command line options """ # Force youtube-dl's progress bar to be outputted as separate lines options_list = ['--newline'] # Create a copy of the dictionary... copy_dict = options_dict.copy() # ...then modify various values in the copy. Set the 'save_path' option self.build_save_path(download_item_obj, copy_dict) # Set the 'video_format' option self.build_video_format(copy_dict) # Set the 'min_filesize' and 'max_filesize' options self.build_file_sizes(copy_dict) # Set the 'limit_rate' option self.build_limit_rate(copy_dict) # Reset the 'playlist_start', 'playlist_end' and 'max_downloads' # options if we're not downloading a video in a playlist if ( isinstance(download_item_obj.media_data_obj, media.Video) \ and not isinstance( download_item_obj.media_data_obj.parent_obj, media.Playlist, ) ) or not isinstance(download_item_obj.media_data_obj, media.Playlist): copy_dict['playlist_start'] = 1 copy_dict['playlist_end'] = 0 copy_dict['max_downloads'] = 0 # Parse basic youtube-dl command line options for option_holder_obj in self.option_holder_list: # First deal with special cases... if option_holder_obj.name == 'extract_audio': if copy_dict['audio_format'] == '': value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) elif option_holder_obj.name == 'audio_format': value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append('-x') options_list.append(option_holder_obj.switch) options_list.append(utils.to_string(value)) # The '-x' / '--audio-quality' switch must precede the # '--audio-quality' switch, if both are used # Therefore, if the current value of the 'audio_quality' # option is not the default value ('5'), then insert the # '--audio-quality' switch into the options list right # now if copy_dict['audio_quality'] != '5': options_list.append('--audio-quality') options_list.append( utils.to_string(copy_dict['audio_quality']), ) elif option_holder_obj.name == 'audio_quality': # If the '--audio-quality' switch was not added by the code # block just above, then follow the standard procedure if option_holder_obj.switch not in options_list: if option_holder_obj.check_requirements(copy_dict): value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) options_list.append(utils.to_string(value)) elif option_holder_obj.name == 'match_filter' \ or option_holder_obj.name == 'external_arg_string' \ or option_holder_obj.name == 'pp_args': value = copy_dict[option_holder_obj.name] if value != '': options_list.append(option_holder_obj.switch) options_list.append('"' + utils.to_string(value) + '"') # For all other options, just check the value is valid elif option_holder_obj.check_requirements(copy_dict): value = copy_dict[option_holder_obj.name] if value != option_holder_obj.default_value: options_list.append(option_holder_obj.switch) if not option_holder_obj.is_boolean(): options_list.append(utils.to_string(value)) # Parse the 'extra_cmd_string' option, which can contain arguments # inside double quotes "..." (arguments that can therefore contain # whitespace) # Set a flag for an item beginning with double quotes, and reset it for # an item ending in double quotes quote_flag = False # Temporary list to hold such quoted arguments quote_list = [] for item in copy_dict['extra_cmd_string'].split(): quote_flag = (quote_flag or item[0] == "\"") if quote_flag: quote_list.append(item) else: options_list.append(item) if quote_flag and item[-1] == "\"": # Special case mode is over. Append our special items to the # options list options_list.append(" ".join(quote_list)[1:-1]) quote_flag = False quote_list = [] # Parse the 'match_title_list' and 'reject_title_list' for item in copy_dict['match_title_list']: options_list.append('--match-title') options_list.append(item) for item in copy_dict['reject_title_list']: options_list.append('--reject-title') options_list.append(item) # Parsing complete return options_list
def hamming_recieve(data): print("MENSAJE: ", to_string(decode(data)))