def get(self, user): """Get all institutions.""" INSTITUTION_ATTRIBUTES = [ 'name', 'key', 'acronym', 'address', 'photo_url', 'description', 'admin', 'cover_photo', 'institutional_email' ] ACTIVE_STATE = "active" page = to_int( self.request.get('page', Utils.DEFAULT_PAGINATION_OFFSET), QueryException, "Query param page must be an integer") limit = to_int( self.request.get('limit', Utils.DEFAULT_PAGINATION_LIMIT), QueryException, "Query param limit must be an integer") queryInstitutions = Institution.query( Institution.state == ACTIVE_STATE) queryInstitutions, more = offset_pagination(page, limit, queryInstitutions) array = [ institution.make(INSTITUTION_ATTRIBUTES) for institution in queryInstitutions ] data = {'institutions': array, 'next': more} self.response.write(json.dumps(data))
def clip_img(img, x_factor, y_factor): """ Clip image by the given x and y factors. :param img: image which needs to be clipped :param x_factor: x value defining the degree of horizontal clipping :param y_factor: y value defining the degree of vertical clipping :return: img: clipped image """ img_height, img_width = img.shape[:2] # set image slice indices x_thresh_left, y_thresh_top = 0, 0 x_thresh_right, y_thresh_bottom = img_width, img_height # process x clipping if x_factor < 0.0: # image gets clipped on the left side logging.info(" => Clip image on left half by %s%%" % (abs(x_factor)*100)) x_thresh_left = utils.to_int(img_width * abs(x_factor / 2)) # divide by 2 because x_factor only gets applied on the right or left half of the image elif x_factor > 0.0: # image gets clipped on the right side logging.info(" => Clip image on right half by %s%%" % (x_factor*100)) x_thresh_right -= utils.to_int(img_width * abs(x_factor / 2)) # process y clipping if y_factor < 0.0: # image gets clipped on the upper side logging.info(" => Clip image on upper half by %s%%" % (abs(y_factor)*100)) y_thresh_top = utils.to_int(img_width * abs(y_factor / 2)) # divide by 2 because y_factor only gets applied on the upper or lower half of the image elif y_factor > 0.0: # image gets clipped on the lower side logging.info(" => Clip image on lower half %s%%" % (y_factor*100)) y_thresh_bottom -= utils.to_int(img_width * abs(y_factor / 2)) return img[y_thresh_top:y_thresh_bottom, x_thresh_left:x_thresh_right, :]
def change_saturation(img, saturation=0): img = np.array(img, dtype=np.uint8) # convert image to uint8 numpy array to avoid cv2 depth errors hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) saturation = utils.to_int(saturation) saturation = utils.to_int((saturation - 50) * 5.1) if saturation == 0: pass if saturation > 0: # increase saturation lim = 255 - saturation s[s > lim] = 255 s[s <= lim] += saturation else: # decrease saturation lim = abs(saturation) s[s > lim] -= abs(saturation) s[s <= lim] = 0 final_hsv = cv2.merge((h, s, v)) img_converted = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) # add alpha channel if existent if img.shape[2] > 3: img_converted = cv2.merge((img_converted, img[:, :, 3])) return img_converted
def get(self, user): """Handler of get posts.""" page = to_int( self.request.get('page', Utils.DEFAULT_PAGINATION_OFFSET), QueryException, "Query param page must be an integer") limit = to_int( self.request.get('limit', Utils.DEFAULT_PAGINATION_LIMIT), QueryException, "Query param limit must be an integer") array = [] visible_posts = [] if len(user.follows) > 0: queryPosts = Post.query(Post.institution.IN( user.follows)).order(-Post.last_modified_date, Post.key) queryPosts, more = offset_pagination( page, limit, queryPosts) array = [post.make(self.request.host) for post in queryPosts] visible_posts = [post for post in array if not Post.is_hidden(post)] data = { 'posts': visible_posts, 'next': more } self.response.write(json.dumps(data))
def change_value(img, value=0): img = np.array(img, dtype=np.uint8) # convert image to uint8 numpy array to avoid cv2 depth errors hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) value = utils.to_int(value) value = utils.to_int((value - 50) * 2) if value == 0: pass if value > 0: # increase value lim = 255 - value v[v > lim] = 255 v[v <= lim] += value else: # decrease value lim = abs(value) v[v > lim] -= abs(value) v[v <= lim] = 0 final_hsv = cv2.merge((h, s, v)) img_converted = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) # add alpha channel if existent if img.shape[2] > 3: img_converted = cv2.merge((img_converted, img[:, :, 3])) return img_converted
def update(self, user_agent): if isinstance(user_agent or 0, str) and len(user_agent): self.ua_data = user_agent_parser.Parse(user_agent) self.ua_data['user_agent'] = {k: to_int(v) for k, v in self.ua_data['user_agent'].items()} self.ua_data['os'] = {k: to_int(v) for k, v in self.ua_data['os'].items()} self.os = OperatingSystem(**self.ua_data['os']) self.browser = Browser(**self.ua_data['user_agent']) self.device = Device(**self.ua_data['device'])
def is_max(self): p = self.current n = to_int(p) for i in range(3): p = rotate90(p) for q in permutations(range(1,4)): if len(p) == len(self.current) and to_int(replace(p,q)) > n: return False return True
def is_max(self): p = self.current n = to_int(p) for i in range(3): p = rotate90(p) for q in permutations(range(1, 4)): if len(p) == len(self.current) and to_int(replace(p, q)) > n: return False return True
def adjust_cropped_joint_position(self, joint, size): r = 100 * self.scale y = joint[0] - (self.y - to_int(r)) x = joint[1] - (self.x - to_int(r)) w = size / (2 * r) y = to_int(w * y) x = to_int(w * x) return [y, x]
def validate_input_params(self, input_params): revolution_period = utils.to_int(input_params['revolution_period']) if revolution_period <= 0: raise ModelError('Период обращения должен быть больше 0.') orbital_inclination = utils.to_int(input_params['orbital_inclination']) if not 0 < orbital_inclination < 90: raise ModelError( 'Наклонение орбиты должно лежать в пределах [0;90].') return { 'revolution_period': revolution_period, 'orbital_inclination': orbital_inclination}
def fletcher_checksum(block): c1 = 0 c2 = 0 for bite in block: c1 += bite c2 += c1 c1 = c1 % to_int(MOD_BITARRAY) c2 = c2 % to_int(MOD_BITARRAY) c1_bitarray = from_int_to_bitarray(c1, MOD_BITARRAY.length()) c2_bitarray = from_int_to_bitarray(c2, MOD_BITARRAY.length()) return (c1_bitarray, c2_bitarray)
def validate_input_params(self, input_params): revolution_period = utils.to_int(input_params['revolution_period']) if revolution_period <= 0: raise ModelError('Период обращения должен быть больше 0.') orbital_inclination = utils.to_int(input_params['orbital_inclination']) if not 0 < orbital_inclination < 90: raise ModelError( 'Наклонение орбиты должно лежать в пределах [0;90].') return { 'revolution_period': revolution_period, 'orbital_inclination': orbital_inclination }
def query_notes(): query = request.json or {} page = utils.to_int(request.args.get('page'), 1) if page < 1: page = 1 size = utils.to_int(request.args.get('size'), 20) if size < 1: size = 20 visitor = utils.get_visitor() if not visitor.is_me: # filter out hidden notes query.update({'$and': [dict(query), {'tags': {'$not': {'$eq': '.'}}}]}) return Note.query(query, page=page, size=size)
def validate_components(self, components, input_params): clean_components = {} if not components['critical_errors_control'] == 'True': raise ModelError('Ваша система должна включать модуль ' 'контроля критических помех.') clean_components['security_quality'] = utils.to_int( components['security_quality']) clean_components['capacitor_quality'] = utils.to_int( components['capacitor_quality']) clean_components['right_capacitor'] = (CAPACITORS[ input_params['device_class']] == components['power_capacity']) return clean_components
def verify(curve, public_key, message, signature): r, s = signature if len(to_bytes(s, SIZE) + to_bytes(r, SIZE)) != SIZE * 2: raise ValueError("Invalid signature length") q = curve.q p = curve.p if r <= 0 or r >= q or s <= 0 or s >= q: return False e = to_int( message ) % curve.q # bytes2long(digest) это альфа - число, соответствующее вектору хэшу полученного сообщения if e == 0: e = 1 v = modular_invert(e, q) z1 = s * v % q z2 = q - r * v % q # вычисляем точку С эллиптической кривой z1 * P + z2 * Q p1x, p1y = curve.exp(z1) q1x, q1y = curve.exp(z2, public_key[0], public_key[1]) cx = q1x - p1x if cx < 0: cx += p cx = modular_invert(cx, p) z1 = q1y - p1y cx = cx * z1 % p cx = cx * cx % p cx = cx - p1x - q1x cx = cx % p if cx < 0: cx += p cx %= q return cx == r
def validate_components(self, components): if not components['critical_errors_control'] == 'True': raise ModelError('Ваша система должна включать модуль ' 'контроля критических помех.') security_quality = utils.to_int(components['security_quality']) return {'security_quality': security_quality}
def parse_info(self, records): if self.join_ts is None: if 'time' in records: self.join_ts = calendar.timegm(datetime.utcfromtimestamp(time.time()).utctimetuple()) - \ to_int(records['time']) * 1000 elif 'time_local' in records: self.join_ts = self.parse_time(records['time_local']) else: self.join_ts = calendar.timegm(datetime.utcfromtimestamp(time.time()).utctimetuple()) if 'status' in records: status = to_int(records['status']) self.status = status if 'http_user_agent' in records: self.detail = records['http_user_agent']
def format_transactions(self, transaction_items): if isinstance(transaction_items, list): [self.format_transactions(t) for t in transaction_items] else: status = to_int(transaction_items.get('Status')) data = { 'braspag_transaction_id': transaction_items.get('BraspagTransactionId'), 'acquirer_transaction_id': transaction_items.get('AcquirerTransactionId'), 'authorization_code': transaction_items.get('AuthorizationCode'), 'amount': to_int(transaction_items.get('Amount')), 'status': status, 'status_message': self.STATUS[status], 'proof_of_sale': transaction_items.get('ProofOfSale'), } if transaction_items.has_key('MaskedCreditCardNumber'): data['masked_credit_card_number'] = transaction_items.get('MaskedCreditCardNumber') if transaction_items.has_key('ReturnCode'): data['return_code'] = transaction_items.get('ReturnCode') if transaction_items.has_key('ReturnMessage'): data['return_message'] = transaction_items.get('ReturnMessage') if transaction_items.has_key('PaymentMethod'): data['payment_method'] = to_int(transaction_items.get('PaymentMethod')) if transaction_items.has_key('CreditCardToken'): data['card_token'] = transaction_items.get('CreditCardToken') if transaction_items.has_key('PaymentMethodName'): data['payment_method_name'] = transaction_items.get('PaymentMethodName') if transaction_items.has_key('TransactionType'): data['transaction_type'] = to_int(transaction_items.get('TransactionType')) if transaction_items.has_key('ReceivedDate'): data['received_date'] = to_date(transaction_items.get('ReceivedDate')) if transaction_items.has_key('CapturedDate'): data['captured_date'] = to_date(transaction_items.get('CapturedDate')) if transaction_items.has_key('OrderId'): data['order_id'] = transaction_items.get('OrderId') self.transactions.append(data)
def crop(self, img, size): r = to_int(100 * self.scale) cropped = slice_pad(img, self.y - r, self.y + r, self.x - r, self.x + r) resized = imresize(cropped, (size, size)) return resized
def validate_components(self, components): if not components['critical_errors_control'] == 'True': raise ModelError( 'Ваша система должна включать модуль ' 'контроля критических помех.') security_quality = utils.to_int(components['security_quality']) return {'security_quality': security_quality}
def get_port(self): value = self.get_argument('port', u'') if not value: return DEFAULT_PORT port = to_int(value) if port is None or not is_valid_port(port): raise InvalidValueError('Invalid port: {}'.format(value)) return port
def change_bcg(img, brightness=0, contrast=0, gamma=0.0): """ Change brightness, contrast and gamma of the given image. :param img: image, which BCG values should be changed :param brightness: new value for brightness :param contrast: new value for contrast :param gamma: new value for gamma :return: img_converted: result image with changed BCG values """ brightness = utils.to_int(brightness) contrast = utils.to_int(contrast) gamma = utils.to_float(gamma) # split BGR image to separate arrays if img.shape[2] > 3: b_img, g_img, r_img, alpha = cv2.split(img) else: b_img, g_img, r_img = cv2.split(img) # manipulate each channel if brightness != 0: b_img = cv2.LUT(b_img, brightness_conversion_lut(brightness)) g_img = cv2.LUT(g_img, brightness_conversion_lut(brightness)) r_img = cv2.LUT(r_img, brightness_conversion_lut(brightness)) if contrast != 0: b_img = cv2.LUT(b_img, contrast_conversion_lut(utils.to_float(contrast+100)/100, 100)) g_img = cv2.LUT(g_img, contrast_conversion_lut(utils.to_float(contrast+100)/100, 100)) r_img = cv2.LUT(r_img, contrast_conversion_lut(utils.to_float(contrast+100)/100, 100)) if gamma != 0.0: b_img = cv2.LUT(b_img, gamma_conversion_lut(gamma*2)) g_img = cv2.LUT(g_img, gamma_conversion_lut(gamma*2)) r_img = cv2.LUT(r_img, gamma_conversion_lut(gamma*2)) # merge channels to colored picture img_converted = merge_bgr_channels(b_img, g_img, r_img) # add alpha channel if existent if img.shape[2] > 3: img_converted = cv2.merge((img_converted, img[:, :, 3])) return img_converted
def apply_resolution_filter(resolution_filter): """ Apply resolution filter with given resolution_filter object parameters. :param resolution_filter: object containing resolution parameters :return: res_x, res_y: x and y resolution in pixel """ if resolution_filter.x_min == resolution_filter.x_max: res_x = utils.to_int(resolution_filter.x_min) else: res_x = random.randint(resolution_filter.x_min, resolution_filter.x_max) if resolution_filter.y_min == resolution_filter.y_max: res_y = utils.to_int(resolution_filter.y_min) else: res_y = random.randint(resolution_filter.y_min, resolution_filter.y_max) logging.info(" => resolution x: %s and y: %s" % (res_x, res_y)) return res_x, res_y
def load_test_result(test_result_file): l = [] with open(test_result_file) as file: lines = csv.reader(file) for line in lines: l.append(line) l.remove(l[0]) label = array(l) return to_int(label[:, 1])
def suntilxor(self, *arg): """ Execute nexti until jmp-cmds Usage: MYNAME depth=1 """ (depth, ) = utils.normalize_argv(arg, 1) depth = utils.to_int(depth) if depth == None: depth = 1 c.suntil("xor", depth)
def load_train_data(train_file_path): tmp_lines = [] log("Start load file") with open(train_file_path, "r") as f: lines = csv.reader(f) for line in lines: tmp_lines.append(line) log("Finish load file") tmp_lines.remove(tmp_lines[0]) # drop the first column line log("Start trans array") tmp_array = array(tmp_lines) log("Finish trans array") label = tmp_array[:, 0] data = tmp_array[:, 1:] return normalizing(to_int(data)), to_int(label)
def calc_context_indep(acts, comms, n_acts, n_comm): # Calculates the context independence (Bogin et al., 2018) comms = [U.to_int(m) for m in comms] acts = [U.to_int(a) for a in acts] eps = 1e-9 p_a = U.probs_from_counts(acts, n_acts, eps=eps) p_c = U.probs_from_counts(comms, n_comm, eps=eps) p_ac = U.bin_acts(comms, acts, n_comm, n_acts) p_ac /= np.sum(p_ac) p_a_c = np.divide(p_ac, np.reshape(p_c, (-1, 1))) p_c_a = np.divide(p_ac, np.reshape(p_a, (1, -1))) ca = np.argmax(p_a_c, axis=0) ci = 0 for a in range(n_acts): ci += p_a_c[ca[a]][a] * p_c_a[ca[a]][a] ci /= n_acts return ci
def calc_entropy(comms, n_comm): # Calculates the entropy of the communication distribution # p(c) is calculated by averaging over episodes comms = [U.to_int(m) for m in comms] eps = 1e-9 p_c = U.probs_from_counts(comms, n_comm, eps=eps) entropy = 0 for c in range(n_comm): entropy += -p_c[c] * math.log(p_c[c]) return entropy
def infox(self, *arg): """ Customized xinfo command from https://github.com/longld/peda Usage: MYNAME address MYNAME register [reg1 reg2] """ (address, regname) = utils.normalize_argv(arg, 2) if address is None: self._missing_argument() text = "" #if not self._is_running(): if False: return def get_reg_text(r, v): text = green("%s" % r.upper().ljust(3), "bold") + ": " chain = e.examine_mem_reference(v) text += utils.format_reference_chain(chain) text += "\n" return text (arch, bits) = e.getarch() if str(address).startswith("r"): # Register regs = e.getregs(" ".join(arg[1:])) if regname is None: for r in REGISTERS[bits]: if r in regs: text += get_reg_text(r, regs[r]) else: for (r, v) in sorted(regs.items()): text += get_reg_text(r, v) if text: utils.msg(text.strip()) if regname is None or "eflags" in regname: self.eflags() return elif utils.to_int(address) is None: warning_utils.msg("not a register nor an address") else: # Address chain = e.examine_mem_reference(address) #text += '\n' #text += 'info: ' text += utils.format_reference_chain(chain) # + "\n" vmrange = e.get_vmrange(address) if vmrange: (start, end, perm, name) = vmrange utils.msg(text) return
def load_test_data(test_file): load_list = [] log("Start load test data") with open(test_file, "r") as f: lines = csv.reader(f) for line in lines: load_list.append(line) load_list.remove(load_list[0]) data = array(load_list) return normalizing(to_int(data))
def stepcalluntil(self, *arg): """ Execute stepcall until regex Usage: MYNAME regex depth=1 """ (regex, depth) = utils.normalize_argv(arg, 2) regex = str(regex) depth = utils.to_int(depth) if depth == None: depth = 1 c.suntil(regex, depth, True)
def calc_mutinfo(acts, comms, n_acts, n_comm): # Calculate mutual information between actions and messages # Joint probability p(a, c) is calculated by counting co-occurences, *not* by performing interventions # If the actions and messages come from the same agent, then this is the speaker consistency (SC) # If the actions and messages come from different agents, this is the instantaneous coordinatino (IC) comms = [U.to_int(m) for m in comms] acts = [U.to_int(a) for a in acts] # Calculate probabilities by counting co-occurrences p_a = U.probs_from_counts(acts, n_acts) p_c = U.probs_from_counts(comms, n_comm) p_ac = U.bin_acts(comms, acts, n_comm, n_acts) p_ac /= np.sum(p_ac) # normalize counts into a probability distribution # Calculate mutual information mutinfo = 0 for c in range(n_comm): for a in range(n_acts): if p_ac[c][a] > 0: mutinfo += p_ac[c][a] * math.log(p_ac[c][a] / (p_c[c] * p_a[a])) return mutinfo
def get(self, user, url_string): """Handler of get posts.""" page = to_int( self.request.get('page', Utils.DEFAULT_PAGINATION_OFFSET), QueryException, "Query param page must be an integer") limit = to_int( self.request.get('limit', Utils.DEFAULT_PAGINATION_LIMIT), QueryException, "Query param limit must be an integer") institution_key = ndb.Key(urlsafe=url_string) queryPosts = Post.query(Post.institution == institution_key).order( -Post.last_modified_date) queryPosts, more = offset_pagination(page, limit, queryPosts) formated_posts = [post.make(self.request.host) for post in queryPosts] visible_posts = [ post for post in formated_posts if not Post.is_hidden(post) ] data = {'posts': visible_posts, 'next': more} self.response.write(json.dumps(data))
def parse_info(self, records): if 'remote_addr' not in records: return client = records['remote_addr'] client_info = None if client not in self.clients: client_info = ClientInfo(client) client_info.parse_info(records) self.clients[client] = client_info else: client_info = self.clients[client] client_info.parse_info(records) if self.start_ts == 0 or self.start_ts > client_info.join_ts: self.start_ts = client_info.join_ts duration = calendar.timegm(datetime.utcfromtimestamp(time.time()).utctimetuple()) - self.clients[client].join_ts if 'in_bytes' in records: self.in_bytes += to_int(records['in_bytes']) if 'in_bw' in records: self.in_bw = to_int(records['in_bw']) if 'out_bytes' in records: self.out_bytes += to_int(records['out_bytes']) elif 'bytes_sent' in records: self.out_bytes += to_int(records['body_bytes_sent']) if 'out_bw' in records: self.out_bw = to_int(records['out_bw']) else: if duration > 0: self.out_bw = self.out_bytes / duration * 1000 / 1024.0 else: self.out_bw = self.out_bytes / 1024.0
def get_cells(self): s=self.send(['show cell 1']) lc=[l.split('|') for l in s.split('\n')] lc=[l for l in lc if l.__class__.__name__=='list' and len(l)==10] lc=[[to_int(e) for e in l] for l in lc][1:] # Keep only resolved antennas lc=[(l[:5],geoloc(*l[1:5])) for l in lc if None not in l[:5]] # Geolocation of antennas lc=[(a,b) for a,b in lc if b] # Keep only geolocated antennas for a,b in lc: arfcn, mcc, mnc, lac, cid = a lat, lon = b mcc2,mnc2, country, network = imsi2mccmnc("%d%02d000000" % (mcc,mnc)) try: self.mydbcur.execute('''INSERT INTO antennas VALUES (strftime('%Y-%m-%d %H:%M:%S'),?,?,?,?,?,?,?,?,?)''',(arfcn,mcc,mnc,lac,cid,country,network,lat,lon)) except sqlite3.Error, msg: pass
def validate_model_params(self, model_params): orbit_radius = utils.to_float(model_params['orbit_radius']) if orbit_radius <= 0: raise ModelError('Проектный радиус орбиты должен быть больше 0.') pos_crit_accel = utils.to_float(model_params['pos_crit_accel']) if pos_crit_accel <= 0: raise ModelError( 'Критическое нормальное положительное ускорение ' 'должно быть больше 0.') neg_crit_accel = utils.to_float(model_params['neg_crit_accel']) if neg_crit_accel <= 0: raise ModelError( 'Критическое нормальное отрицательное ускорение ' 'должно быть больше 0.') permissible_variation = utils.to_int('model_params') return { 'orbit_radius': orbit_radius, 'pos_crit_accel': pos_crit_accel, 'neg_crit_accel': neg_crit_accel, 'permissible_variation': permissible_variation}
def __init__(self, l_in, n_layers, pheight, pwidth, dim_proj, nclasses, stack_sublayers, # outsampling out_upsampling_type, out_nfilters, out_filters_size, out_filters_stride, out_W_init=lasagne.init.GlorotUniform(), out_b_init=lasagne.init.Constant(0.), out_nonlinearity=lasagne.nonlinearities.identity, hypotetical_fm_size=np.array((100.0, 100.0)), # input ConvLayers in_nfilters=None, in_filters_size=((3, 3), (3, 3)), in_filters_stride=((1, 1), (1, 1)), in_W_init=lasagne.init.GlorotUniform(), in_b_init=lasagne.init.Constant(0.), in_nonlinearity=lasagne.nonlinearities.rectify, in_vgg_layer='conv3_3', # common recurrent layer params RecurrentNet=lasagne.layers.GRULayer, nonlinearity=lasagne.nonlinearities.rectify, hid_init=lasagne.init.Constant(0.), grad_clipping=0, precompute_input=True, mask_input=None, # 1x1 Conv layer for dimensional reduction conv_dim_red=False, conv_dim_red_nonlinearity=lasagne.nonlinearities.identity, # GRU specific params gru_resetgate=lasagne.layers.Gate(W_cell=None), gru_updategate=lasagne.layers.Gate(W_cell=None), gru_hidden_update=lasagne.layers.Gate( W_cell=None, nonlinearity=lasagne.nonlinearities.tanh), gru_hid_init=lasagne.init.Constant(0.), # LSTM specific params lstm_ingate=lasagne.layers.Gate(), lstm_forgetgate=lasagne.layers.Gate(), lstm_cell=lasagne.layers.Gate( W_cell=None, nonlinearity=lasagne.nonlinearities.tanh), lstm_outgate=lasagne.layers.Gate(), # RNN specific params rnn_W_in_to_hid=lasagne.init.Uniform(), rnn_W_hid_to_hid=lasagne.init.Uniform(), rnn_b=lasagne.init.Constant(0.), # Special layers batch_norm=False, name=''): """A ReSeg layer The ReSeg layer is composed by multiple ReNet layers and an upsampling layer Parameters ---------- l_in : lasagne.layers.Layer The input layer, in bc01 format n_layers : int The number of layers pheight : tuple The height of the patches, for each layer pwidth : tuple The width of the patches, for each layer dim_proj : tuple The number of hidden units of each RNN, for each layer nclasses : int The number of classes of the data stack_sublayers : bool If True the bidirectional RNNs in the ReNet layers will be stacked one over the other. See ReNet for more details. out_upsampling_type : string The kind of upsampling to be used out_nfilters : int The number of hidden units of the upsampling layer out_filters_size : tuple The size of the upsampling filters, if any out_filters_stride : tuple The stride of the upsampling filters, if any out_W_init : Theano shared variable, numpy array or callable Initializer for W out_b_init : Theano shared variable, numpy array or callable Initializer for b out_nonlinearity : Theano shared variable, numpy array or callable The nonlinearity to be applied after the upsampling hypotetical_fm_size : float The hypotetical size of the feature map that would be input of the layer if the input image of the whole network was of size (100, 100) RecurrentNet : lasagne.layers.Layer A recurrent layer class nonlinearity : callable or None The nonlinearity that is applied to the output. If None is provided, no nonlinearity will be applied. hid_init : callable, np.ndarray, theano.shared or lasagne.layers.Layer Initializer for initial hidden state grad_clipping : float If nonzero, the gradient messages are clipped to the given value during the backward pass. precompute_input : bool If True, precompute input_to_hid before iterating through the sequence. This can result in a speedup at the expense of an increase in memory usage. mask_input : lasagne.layers.Layer Layer which allows for a sequence mask to be input, for when sequences are of variable length. Default None, which means no mask will be supplied (i.e. all sequences are of the same length). gru_resetgate : lasagne.layers.Gate Parameters for the reset gate, if RecurrentNet is GRU gru_updategate : lasagne.layers.Gate Parameters for the update gate, if RecurrentNet is GRU gru_hidden_update : lasagne.layers.Gate Parameters for the hidden update, if RecurrentNet is GRU gru_hid_init : callable, np.ndarray, theano.shared or lasagne.layers.Layer Initializer for initial hidden state, if RecurrentNet is GRU lstm_ingate : lasagne.layers.Gate Parameters for the input gate, if RecurrentNet is LSTM lstm_forgetgate : lasagne.layers.Gate Parameters for the forget gate, if RecurrentNet is LSTM lstm_cell : lasagne.layers.Gate Parameters for the cell computation, if RecurrentNet is LSTM lstm_outgate : lasagne.layers.Gate Parameters for the output gate, if RecurrentNet is LSTM rnn_W_in_to_hid : Theano shared variable, numpy array or callable Initializer for input-to-hidden weight matrix, if RecurrentNet is RecurrentLayer rnn_W_hid_to_hid : Theano shared variable, numpy array or callable Initializer for hidden-to-hidden weight matrix, if RecurrentNet is RecurrentLayer rnn_b : Theano shared variable, numpy array, callable or None Initializer for bias vector, if RecurrentNet is RecurrentLaye. If None is provided there will be no bias batch_norm: this add a batch normalization layer at the end of the network right after each Gradient Upsampling layers name : string The name of the layer, optional """ super(ReSegLayer, self).__init__(l_in, name) self.l_in = l_in self.n_layers = n_layers self.pheight = pheight self.pwidth = pwidth self.dim_proj = dim_proj self.nclasses = nclasses self.stack_sublayers = stack_sublayers # upsampling self.out_upsampling_type = out_upsampling_type self.out_nfilters = out_nfilters self.out_filters_size = out_filters_size self.out_filters_stride = out_filters_stride self.out_W_init = out_W_init self.out_b_init = out_b_init self.out_nonlinearity = out_nonlinearity self.hypotetical_fm_size = hypotetical_fm_size # input ConvLayers self.in_nfilters = in_nfilters self.in_filters_size = in_filters_size self.in_filters_stride = in_filters_stride self.in_W_init = in_W_init self.in_b_init = in_b_init self.in_nonlinearity = in_nonlinearity self.in_vgg_layer = in_vgg_layer # common recurrent layer params self.RecurrentNet = RecurrentNet self.nonlinearity = nonlinearity self.hid_init = hid_init self.grad_clipping = grad_clipping self.precompute_input = precompute_input self.mask_input = mask_input # GRU specific params self.gru_resetgate = gru_resetgate self.gru_updategate = gru_updategate self.gru_hidden_update = gru_hidden_update self.gru_hid_init = gru_hid_init # LSTM specific params self.lstm_ingate = lstm_ingate self.lstm_forgetgate = lstm_forgetgate self.lstm_cell = lstm_cell self.lstm_outgate = lstm_outgate # RNN specific params self.rnn_W_in_to_hid = rnn_W_in_to_hid self.rnn_W_hid_to_hid = rnn_W_hid_to_hid self.name = name self.sublayers = [] expand_height = expand_width = 1 # Input ConvLayers l_conv = l_in if isinstance(in_nfilters, Iterable) and not isinstance(in_nfilters, str): for i, (nf, f_size, stride) in enumerate( zip(in_nfilters, in_filters_size, in_filters_stride)): l_conv = ConvLayer( l_conv, num_filters=nf, filter_size=f_size, stride=stride, W=in_W_init, b=in_b_init, pad='valid', name=self.name + '_input_conv_layer' + str(i) ) self.sublayers.append(l_conv) self.hypotetical_fm_size = ( (self.hypotetical_fm_size - 1) * stride + f_size) # TODO This is right only if stride == filter... expand_height *= f_size[0] expand_width *= f_size[1] # Print shape out_shape = get_output_shape(l_conv) print('ConvNet: After in-convnet: {}'.format(out_shape)) # Pretrained vgg16 elif type(in_nfilters) == str: from vgg16 import Vgg16Layer l_conv = Vgg16Layer(l_in, self.in_nfilters, False, False) hypotetical_fm_size /= 8 expand_height = expand_width = 8 self.sublayers.append(l_conv) # Print shape out_shape = get_output_shape(l_conv) print('Vgg: After vgg: {}'.format(out_shape)) # ReNet layers l_renet = l_conv for lidx in xrange(n_layers): l_renet = ReNetLayer(l_renet, patch_size=(pwidth[lidx], pheight[lidx]), n_hidden=dim_proj[lidx], stack_sublayers=stack_sublayers[lidx], RecurrentNet=RecurrentNet, nonlinearity=nonlinearity, hid_init=hid_init, grad_clipping=grad_clipping, precompute_input=precompute_input, mask_input=mask_input, # GRU specific params gru_resetgate=gru_resetgate, gru_updategate=gru_updategate, gru_hidden_update=gru_hidden_update, gru_hid_init=gru_hid_init, # LSTM specific params lstm_ingate=lstm_ingate, lstm_forgetgate=lstm_forgetgate, lstm_cell=lstm_cell, lstm_outgate=lstm_outgate, # RNN specific params rnn_W_in_to_hid=rnn_W_in_to_hid, rnn_W_hid_to_hid=rnn_W_hid_to_hid, rnn_b=rnn_b, batch_norm=batch_norm, name=self.name + '_renet' + str(lidx)) self.sublayers.append(l_renet) self.hypotetical_fm_size /= (pwidth[lidx], pheight[lidx]) # Print shape out_shape = get_output_shape(l_renet) if stack_sublayers: msg = 'ReNet: After 2 rnns {}x{}@{} and 2 rnns 1x1@{}: {}' print(msg.format(pheight[lidx], pwidth[lidx], dim_proj[lidx], dim_proj[lidx], out_shape)) else: print('ReNet: After 4 rnns {}x{}@{}: {}'.format( pheight[lidx], pwidth[lidx], dim_proj[lidx], out_shape)) # 1x1 conv layer : dimensionality reduction layer if conv_dim_red: l_renet = lasagne.layers.Conv2DLayer( l_renet, num_filters=dim_proj[lidx], filter_size=(1, 1), W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.), pad='valid', nonlinearity=conv_dim_red_nonlinearity, name=self.name + '_1x1_conv_layer' + str(lidx) ) # Print shape out_shape = get_output_shape(l_renet) print('Dim reduction: After 1x1 convnet: {}'.format(out_shape)) # Upsampling if out_upsampling_type == 'autograd': raise NotImplementedError( 'This will not work as the dynamic cropping will crop ' 'part of the image.') nlayers = len(out_nfilters) assert nlayers > 1 # Compute the upsampling ratio and the corresponding params h2 = np.array((100., 100.)) up_ratio = (h2 / self.hypotetical_fm_size) ** (1. / nlayers) h1 = h2 / up_ratio h0 = h1 / up_ratio stride = to_int(ceildiv(h2 - h1, h1 - h0)) filter_size = to_int(ceildiv((h1 * (h1 - 1) + h2 - h2 * h0), (h1 - h0))) target_shape = get_output(l_renet).shape[2:] l_upsampling = l_renet for l in range(nlayers): target_shape = target_shape * up_ratio l_upsampling = TransposedConv2DLayer( l_upsampling, num_filters=out_nfilters[l], filter_size=filter_size, stride=stride, W=out_W_init, b=out_b_init, nonlinearity=out_nonlinearity) self.sublayers.append(l_upsampling) up_shape = get_output(l_upsampling).shape[2:] # Print shape out_shape = get_output_shape(l_upsampling) print('Transposed autograd: {}x{} (str {}x{}) @ {}:{}'.format( filter_size[0], filter_size[1], stride[0], stride[1], out_nfilters[l], out_shape)) # CROP # pad in TransposeConv2DLayer cannot be a tensor --> we cannot # crop unless we know in advance by how much! crop = T.max(T.stack([up_shape - target_shape, T.zeros(2)]), axis=0) crop = crop.astype('uint8') # round down l_upsampling = CropLayer( l_upsampling, crop, data_format='bc01') self.sublayers.append(l_upsampling) # Print shape print('Dynamic cropping') elif out_upsampling_type == 'grad': l_upsampling = l_renet for i, (nf, f_size, stride) in enumerate(zip( out_nfilters, out_filters_size, out_filters_stride)): l_upsampling = TransposedConv2DLayer( l_upsampling, num_filters=nf, filter_size=f_size, stride=stride, crop=0, W=out_W_init, b=out_b_init, nonlinearity=out_nonlinearity) self.sublayers.append(l_upsampling) if batch_norm: l_upsampling = lasagne.layers.batch_norm( l_upsampling, axes='auto') self.sublayers.append(l_upsampling) print "Batch normalization after Grad layer " # Print shape out_shape = get_output_shape(l_upsampling) print('Transposed conv: {}x{} (str {}x{}) @ {}:{}'.format( f_size[0], f_size[1], stride[0], stride[1], nf, out_shape)) elif out_upsampling_type == 'linear': # Go to b01c l_upsampling = lasagne.layers.DimshuffleLayer( l_renet, (0, 2, 3, 1), name=self.name + '_grad_undimshuffle') self.sublayers.append(l_upsampling) expand_height *= np.prod(pheight) expand_width *= np.prod(pwidth) l_upsampling = LinearUpsamplingLayer(l_upsampling, expand_height, expand_width, nclasses, batch_norm=batch_norm, name="linear_upsample_layer") self.sublayers.append(l_upsampling) print('Linear upsampling') if batch_norm: l_upsampling = lasagne.layers.batch_norm( l_upsampling, axes=(0, 1, 2)) self.sublayers.append(l_upsampling) print "Batch normalization after Linear upsampling layer " # Go back to bc01 l_upsampling = lasagne.layers.DimshuffleLayer( l_upsampling, (0, 3, 1, 2), name=self.name + '_grad_undimshuffle') self.sublayers.append(l_upsampling) self.l_out = l_upsampling # HACK LASAGNE # This will set `self.input_layer`, which is needed by Lasagne to find # the layers with the get_all_layers() helper function in the # case of a layer with sublayers if isinstance(self.l_out, tuple): self.input_layer = None else: self.input_layer = self.l_out
def multi_divedable_pandigitals(): start_permutation = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] for p in utils.permutations(start_permutation): if multi_div_test(p): yield utils.to_int(p) print p
def div_test(p, divisor): return utils.to_int(p) % divisor == 0
def validate_components(self, components, input_params): clean_components = {} if not components['critical_errors_control'] == 'True': raise ModelError( 'Ваша система должна включать модуль ' 'контроля критических помех.') clean_components['security_quality'] = utils.to_int( components['security_quality']) clean_components['capacitor_quality'] = utils.to_int( components['capacitor_quality']) clean_components['right_capacitor'] = ( CAPACITORS[input_params['device_class']] == components['power_capacity']) return clean_components