def tiny_yolo_body(inputs, num_anchors, num_classes): '''Create Tiny YOLO_v3 model CNN body in keras.''' x1 = compose( DarknetConv2D_BN_Leaky(16, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(128, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs) x2 = compose( MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(512, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'), DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(256, (1, 1)))(x1) y1 = compose(DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2) x2 = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2) y2 = compose(Concatenate(), DarknetConv2D_BN_Leaky(256, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))([x2, x1]) return Model(inputs, [y1, y2])
def tiny_yolo_body(inputs, num_anchors, num_classes): '''Create Tiny YOLO_v3 model CNN body in keras.''' x1 = compose( DarknetConv2D_BN_Leaky(16, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(128, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs) x2 = compose( MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(512, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'), DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(256, (1, 1)))(x1) y1 = compose(DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2) # 卷积层的最后,如何得出具有实际意义的最后的feature map,上面传入的参数很重要, # 最后输入keras的内置函数Conv2D(*args, **darknet_conv_kwargs) # 具体做了什么需要进一步探究 x2 = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2) y2 = compose(Concatenate(), DarknetConv2D_BN_Leaky(256, (3, 3)), DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))([x2, x1]) return Model(inputs, [y1, y2])
def make_tiny_yolo_model(input, num_anchors_per_scale, num_classes): """Create Tiny YOLO_v3 model CNN body in keras.""" x1 = compose( DarknetConv2D_BN_Leaky(16, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(128, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(256, (3, 3)))(input) x2 = compose( MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'), DarknetConv2D_BN_Leaky(512, (3, 3)), MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'), DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(256, (1, 1)))(x1) y1 = compose( DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D(num_anchors_per_scale * (5 + num_classes), (1, 1)))(x2) x2 = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x2) y2 = compose( Concatenate(), DarknetConv2D_BN_Leaky(256, (3, 3)), DarknetConv2D(num_anchors_per_scale * (5 + num_classes), (1, 1)))([x2, x1]) return Model(input, [y1, y2])
def tuplize(ntuple, attrs, obj): def apply_to_second(f, tupl): return (tupl.index, f(tupl.attr_type)) def get_type(attr, val): AttrType = namedtuple("AttrType", ["attr", "val", "type"]) return AttrType._make((attr, val, ntuple._field_types.get(attr))) def to_tuplize(attr): return hasattr(attr.type[0], "_fields") if isinstance(attr.type, list) else hasattr(attr.type, "_fields") def tuplize_recur(attr): is_collection, attr_type = extract_type(attr.type) if is_collection: return map(partial(tuplize, attr.type, attr.type._fields), attr.val) return tuplize(attr.type, attr.type._fields, attr.val) attr_vals = map(partial(getattr, obj), attrs) if not hasattr(ntuple, "_field_types"): return ntuple._make(attr_vals) attrs_with_types = compose( partial(map, namedtuple("IndxAttrType", ["index", "attr_type"])._make), enumerate, partial(starmap, get_type), izip, )(attrs, attr_vals) attrs_to_tuplize = filter(compose(to_tuplize, attrgetter("attr_type")), attrs_with_types) attrs_not_tuplize = map( partial(apply_to_second, attrgetter("val")), ifilterfalse(partial(contains, attrs_to_tuplize), attrs_with_types) ) tuplized_attrs = map(partial(apply_to_second, tuplize_recur), attrs_to_tuplize) attr_vals = map(get_second, sorted(attrs_not_tuplize + tuplized_attrs, key=attrgetter("index"))) return ntuple._make(attr_vals)
def last_layers(inputs_tensor, filters, num_predictions): """Make the prediction for yolo_body""" conv1 = FullConv(filters, (1, 1), padding='same') conv2 = FullConv(filters * 2, (3, 3), padding='same') predictions = Conv2D(num_predictions, (1, 1), padding='same', kernel_regularizer=l2(5e-4), use_bias=False) x = compose(conv1, conv2, conv1, conv2, conv1)(inputs_tensor) y = compose(conv2, predictions)(x) return x, y
def step(self): for p, h in self.ret_params(): if p not in self.state: self.state[p] = {} update(self.state[p], self.stats, lambda o: o.init_state(p)) state = self.state[p] for stat in self.stats: state = stat.update(state, p, **h) compose(p, self.steppers, **state, **h) self.state[p] = state
def make_last_layers(x, num_filters, out_filters): """6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer""" x = compose(DarknetConv2D_BN_Leaky(num_filters, (1, 1)), DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D_BN_Leaky(num_filters, (1, 1)), DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x) y = compose(DarknetConv2D_BN_Leaky(num_filters * 2, (3, 3)), DarknetConv2D(out_filters, (1, 1)))(x) return x, y
def build(input_shape, num_outputs, block_type, repetitions, filter=64, k=1): '''ResNet モデルを作成する Factory クラス Arguments: input_shape: 入力の形状 num_outputs: ネットワークの出力数 block_type : residual block の種類 ('basic' or 'bottleneck') repetitions: 同じ residual block を何個反復させるか ''' # block_type に応じて、residual block を生成する関数を選択する。 if block_type == 'basic': block_fn = basic_block elif block_type == 'bottleneck': block_fn = bottleneck_block # モデルを作成する。 ############################################## input = layers.Input(shape=input_shape) # conv1 (batch normalization -> ReLU -> conv) conv1 = utils.compose( ResNetConv2D(filters=filter, kernel_size=(7, 7), strides=(2, 2), input_shape=input_shape), layers.BatchNormalization(), layers.Activation('relu'))(input) # pool pool1 = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv1) # conv2_x, conv3_x, conv4_x, conv5_x block = pool1 for i, r in enumerate(repetitions): block = residual_blocks(block_fn, filters=filter * k, repetitions=r, is_first_layer=(i == 0))(block) filter *= 2 # batch normalization -> ReLU block = utils.compose(layers.BatchNormalization(), layers.Activation('relu'))(block) # global average pooling pool2 = layers.GlobalAveragePooling2D()(block) # dense fc1 = layers.Dense(units=num_outputs, kernel_initializer='he_normal', activation='softmax')(pool2) return models.Model(inputs=input, outputs=fc1)
def bn_relu_conv(add_dropout=False, *args, **kwargs): '''batch normalization -> ReLU -> conv を作成する。 ''' if add_dropout: return utils.compose(layers.BatchNormalization(), layers.Activation('relu'), layers.Dropout(rate=0.35), ResNetConv2D(*args, **kwargs)) return utils.compose(layers.BatchNormalization(), layers.Activation('relu'), ResNetConv2D(*args, **kwargs))
def yolo_body(inputs, num_anchors, num_classes): """Create YOLO_V3 model CNN body in Keras.""" darknet = Model(inputs, darknet_body(inputs)) x, y1 = make_last_layers(darknet.output, 512, num_anchors * (num_classes + 5)) x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[152].output]) x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5)) x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[92].output]) x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5)) return Model(inputs, [y1, y2, y3])
def yolo_body(inputs_tensor, num_anchors, num_classes): """Create Yolo body which output the prediction of the 3 last stage of darknet""" darknet = tf.keras.Model(inputs_tensor, darknet_body(inputs_tensor)) x, y1 = last_layers(darknet.output, 512, num_anchors * (num_classes + 5)) x = compose(FullConv(256, (1, 1), padding='same'), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[-2].output]) x = FullConv(512, (3, 3), padding='same')(x) x, y2 = last_layers(x, 256, num_anchors * (5 + num_classes)) x = compose(FullConv(128, (1, 1), padding='same'), UpSampling2D(2))(x) x = Concatenate()([x, darknet.layers[-3].output]) x = FullConv(256, (3, 3), padding='same')(x) x, y3 = last_layers(x, 128, num_anchors * (num_classes + 5)) return [y1, y2, y3]
def yolo_body(inputs, box_size, num_anchors, num_classes): """Create main body of YOLO model. Positional arguments: inputs keras Input layer of images; should have shape (?, h, w, c) box_size bounding box shape; type=int num_anchors number of anchors; type=int num_classes number of classes; type=int Returns: """ darknet = Model(inputs, darknet_body()(inputs)) conv20 = utils.compose(DarknetConv2D_BN_Leaky(1024, (3, 3)), DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output) conv13 = darknet.layers[43].output conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv21_reshaped = Lambda(space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv21) x = concatenate([conv21_reshaped, conv20]) x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + box_size), (1, 1))(x) return Model(inputs, x)
def yolo_body(inputs, num_anchors, num_classes, batch_size, stage): """Create YOLO_V2 model CNN body in Keras.""" if stage not in (0, 1, 2): print("Incorrect Stage: {}".format(stage)) else: if stage == 0: network = yolo_lstm_stage_1(inputs, num_anchors, num_classes, stateful=False) if stage == 1: network = Model(inputs, darknet_body()(inputs)) if stage == 2: network = Model(inputs, darknet_body()(inputs)) conv20 = compose(DarknetConv2D_BN_Leaky(512, (3, 3)), DarknetConv2D_BN_Leaky(512, (3, 3)))(network.output) conv13 = network.get_layer('middle_layer').output conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13) # TODO: Allow Keras Lambda to use func arguments for output_shape? conv21_reshaped = Lambda(space_to_depth_x2, output_shape=space_to_depth_x2_output_shape, name='space_to_depth')(conv21) x = concatenate([conv21_reshaped, conv20]) x = DarknetConv2D_BN_Leaky(512, (3, 3))(x) x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x) return Model(inputs, x)
def get_insights(ad_id): time_obj = {'since': time.strftime('%Y-%m-%d', time.localtime(time.time()))} time_obj['until'] = time_obj['since'] query = tool.compose({'time_range': json.dumps(time_obj), 'fields': 'spend,actions'}) out = url_get(tool.FB_HOST_URL + ad_id + '/insights?{}'.format(query)) json_out = json.loads(out) return tool.get_insights_by_json(json_out)
def yolo_body(inputs, num_anchors, num_classes): feat1, feat2, feat3 = darknet53(inputs) darknet = Model(inputs, feat3) x, y1 = make_last_layers(darknet.output, 512, num_anchors * (num_classes + 5)) x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2)(x)) x = Concatenate()([x, feat2]) x, y2 = make_last_layers(x, 256, num_anchors * (num_classes + 5)) x = compose(DarknetConv2D(128, (1, 1)), UpSampling2D(2)(x)) x = compose()([x, feat1]) x, y3 = make_last_layers(x, 128, num_anchors * (num_classes + 5)) return Model(inputs, [y1, y2, y3])
def _infer(model, root_path, test_loader=None): if test_loader is None: test_loader = data_loader( root=os.path.join(root_path, 'test_data'), phase='test') x_hats = [] fnames = [] desc = 'infer...' with torch.no_grad(): for data in tqdm(test_loader, desc=desc, total=len(test_loader), disable=use_nsml): if isinstance(test_loader.dataset, torch.utils.data.dataset.Subset): fname, x_input, mask, _ = data else: fname, x_input, mask = data x_input = x_input.cuda() mask = mask.cuda() x_mask = torch.cat([x_input, mask], dim=1) x_hat = model(x_mask) x_hat = compose(x_input, x_hat, mask) x_hats.append(x_hat.cpu()) fnames = fnames + list(fname) x_hats = torch.cat(x_hats, dim=0) return fnames, x_hats
def main(): args = get_args() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = Inpaint() model = model.to(device) optim = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) save, load = bind_nsml(model, optim) if args.pause == 1: nsml.paused(scope=locals()) if args.mode == 'train': path_train = os.path.join(dir_data_root, 'train') path_train_data = os.path.join(dir_data_root, 'train', 'train_data') tr_loader, val_loader = data_loader_with_split(path_train, batch_size=args.batch_size) postfix = dict() total_step = 0 for epoch in trange(args.num_epochs, disable=use_nsml): pbar = tqdm(enumerate(tr_loader), total=len(tr_loader), disable=use_nsml) for step, (_, x_input, mask, x_GT) in pbar: total_step += 1 x_GT = x_GT.to(device) x_input = x_input.to(device) mask = mask.to(device) x_mask = torch.cat([x_input, mask], dim=1) model.zero_grad() x_hat = model(x_mask) x_composed = compose(x_input, x_hat, mask) loss = l1_loss(x_composed, x_GT) loss.backward() optim.step() postfix['loss'] = loss.item() if use_nsml: postfix['epoch'] = epoch postfix['step_'] = step postfix['total_step'] = total_step postfix['steps_per_epoch'] = len(tr_loader) if step % args.eval_every == 0: vutils.save_image(x_GT, 'x_GT.png', normalize=True) vutils.save_image(x_input, 'x_input.png', normalize=True) vutils.save_image(x_hat, 'x_hat.png', normalize=True) vutils.save_image(mask, 'mask.png', normalize=True) metric_eval = local_eval(model, val_loader, path_train_data) postfix['metric_eval'] = metric_eval if use_nsml: if step % args.print_every == 0: print(postfix) nsml.report(**postfix, scope=locals(), step=total_step) else: pbar.set_postfix(postfix) if use_nsml: nsml.save(epoch) else: save(epoch)
def yolo_body_test(inputs_tensor, num_anchors, num_classes): """Create Yolo body which output the prediction of the 3 last stage of darknet ONLY USED FOR TESTING PURPOSE""" darknet = darknet_body_test(inputs_tensor) x, y1 = last_layers(darknet[0], 512, num_anchors * (num_classes + 5)) x = compose(FullConv(256, (1, 1), padding='same'), UpSampling2D(2))(x) x = Concatenate()([x, darknet[1]]) x = FullConv(512, (3, 3), padding='same')(x) x, y2 = last_layers(x, 256, num_anchors * (5 + num_classes)) x = compose(FullConv(128, (1, 1), padding='same'), UpSampling2D(2))(x) x = Concatenate()([x, darknet[-1]]) x = FullConv(256, (3, 3), padding='same')(x) x, y3 = last_layers(x, 128, num_anchors * (num_classes + 5)) return [y1, y2, y3]
def darknet_body(): """Generate first 18 conv layers of Darknet-19.""" return compose(DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(), bottleneck_block(128, 64), MaxPooling2D(), bottleneck_block(256, 128), MaxPooling2D(), bottleneck_x2_block(512, 256), MaxPooling2D(), bottleneck_x2_block(1024, 512))
def darknet_body_3d(): """Generate first 18 conv layers of Darknet-19""" return utils.compose(DarknetConv3D_BN_Leaky(16, (3, 3, 3)), MaxPooling3D(), DarknetConv3D_BN_Leaky(32, (3, 3, 3)), MaxPooling3D(), bottleneck_3d_block(64, 32), MaxPooling3D(), bottleneck_3d_block(128, 64), MaxPooling3D(), bottleneck_3d_x2_block(256, 128), MaxPooling3D(), bottleneck_3d_x2_block(512, 256))
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" """每一个卷积层都是由BN和LeakyReLU组成的,BN一般在激活函数之前设置,没有偏置项,why?""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
def reducer(self, func, expr, itertype='', **kwargs): """Reduces the iter retured by `evals(expr)` into a single value using the reducer `func` """ # if callable(expr): # # expr is a partial ready to call # return compose(func, expr, **kwargs) # else: return compose(func, self.partial(expr, itertype=itertype), **kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" """Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose(DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
def preprocess_dataset( dataset_t, dataset_v, ): if negative_filter: filter_pipeline = compose( FilterNegative().fit_transform, onehot_encoder.fit_transform, ) else: filter_pipeline = compose(onehot_encoder.fit_transform, ) return ( feature_normalizer.fit_transform(filter_pipeline(dataset_t)), feature_normalizer.fit_transform(filter_pipeline(dataset_v)), ) if feature_normalizer else ( filter_pipeline(dataset_t), filter_pipeline(dataset_v), )
def resblock_body(x, num_filters, num_blocks): """A series of resblocks starting with a downsampling Convolution2D""" # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1, 0), (1, 0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x) for i in range(num_blocks): y = compose(DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)), DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x) x = Add()([x, y]) return x
def darknet_body(): """Generate first 18 conv layers of Darknet-19.""" """ Parsing section convolutional_0 conv2d bn leaky (3, 3, 3, 32) Parsing section maxpool_0 Parsing section convolutional_1 conv2d bn leaky (3, 3, 32, 64) Parsing section maxpool_1 Parsing section convolutional_2 conv2d bn leaky (3, 3, 64, 128) Parsing section convolutional_3 conv2d bn leaky (1, 1, 128, 64) Parsing section convolutional_4 conv2d bn leaky (3, 3, 64, 128) Parsing section maxpool_2 Parsing section convolutional_5 conv2d bn leaky (3, 3, 128, 256) Parsing section convolutional_6 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_7 conv2d bn leaky (3, 3, 128, 256) Parsing section maxpool_3 Parsing section convolutional_8 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_9 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_10 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_11 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_12 conv2d bn leaky (3, 3, 256, 512) Parsing section maxpool_4 Parsing section convolutional_13 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_14 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_15 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_16 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_17 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_18 conv2d bn leaky (3, 3, 1024, 1024) Parsing section convolutional_19 conv2d bn leaky (3, 3, 1024, 1024) """ return compose(DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(), DarknetConv2D_BN_Leaky(64, (3, 3)), MaxPooling2D(), bottleneck_block(128, 64), MaxPooling2D(), bottleneck_block(256, 128), MaxPooling2D(), bottleneck_x2_block(512, 256), MaxPooling2D(), bottleneck_x2_block(1024, 512))
def make_yolo_model(input, num_anchors_per_scale, num_classes): """Create YOLO_V3 model CNN body in Keras.""" b1, b2, b3 = darknet_body(input) x, y1 = make_last_layers(b1, 512, num_anchors_per_scale * (5 + num_classes)) x = compose(DarknetConv2D_BN_Leaky(256, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, b2]) x, y2 = make_last_layers(x, 256, num_anchors_per_scale * (5 + num_classes)) x = compose(DarknetConv2D_BN_Leaky(128, (1, 1)), UpSampling2D(2))(x) x = Concatenate()([x, b3]) _, y3 = make_last_layers(x, 128, num_anchors_per_scale * (5 + num_classes)) return Model(input, [y1, y2, y3])
def _get_sql_data(self, data): """ returns tuple with (columns, values) """ em = EntityManager() db = em.get_db() config = Config() charset = config.db.charset if "charset" in config.db.keys( ) else "utf8" converter = MySQLConverter(charset) def none_to_null(val): if val is None: return "NULL" return val def quote(val): if isinstance(val, NUMERIC_TYPES): return str(val) return "'" + val + "'" def quote_col(val): return "`" + val + "`" _escape_value = compose(none_to_null, quote, converter.escape) _escape_column = compose( none_to_null, quote_col, converter.escape) #column quting is different than normal quotes if self.key_name not in data.keys( ) and self.key is not None: #add the key to the data data[self.key_name] = self.key columns = list() values = list() for k, v in data.items(): values.append(_escape_value(v)) columns.append(_escape_column(k)) return (columns, values)
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) #一方面是为了用left and top padding,另一方面因为下面在进行3x3卷积的时候padding=same #其实还是为了使用这种填充方法吧,好处是? x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
def feature_transform_date_account_created(df): df_result = apply_on( df, "date_account_created", compose(parse, str)) df_result[DATE_ACCOUNT_CREATED_YEAR] = \ df_result.date_account_created.apply(lambda x: x.year) df_result[DATE_ACCOUNT_CREATED_MONTH] = \ df_result.date_account_created.apply(lambda x: x.month) df_result[DATE_ACCOUNT_CREATED_DAY] = \ df_result.date_account_created.apply(lambda x: x.day) df_result[DATE_ACCOUNT_CREATED_SEASON] = \ df_result.date_account_created.apply( lambda x: day_of_year_to_season(x.timetuple().tm_yday)) return df_result
def feature_transform_first_active(df_train): df_result = apply_on( df_train, "timestamp_first_active", compose(parse, str)) df_result[TRANSFORM_FIRST_ACTIVE_YEAR] = \ df_result.timestamp_first_active.apply(lambda x: x.year) df_result[TRANSFORM_FIRST_ACTIVE_MONTH] = \ df_result.timestamp_first_active.apply(lambda x: x.month) df_result[TRANSFORM_FIRST_ACTIVE_DAY] = \ df_result.timestamp_first_active.apply(lambda x: x.day) df_result[TRANSFORM_FIRST_ACTIVE_SEASON] = \ df_result.timestamp_first_active.apply( lambda x: day_of_year_to_season(x.dayofyear)) return df_result
def impute_dataset( dataset_t, dataset_v, ): if stat_impute_type in ['mean', 'median']: stat_impute_func = BasicImputation if stat_impute_type in ['mice', 'missforest', 'knn']: stat_impute_func = StandardImputation if stat_impute_type in ['gain']: stat_impute_func = NNImputation if temp_impute_type in ['mean', 'median']: temp_impute_func = BasicImputation temp_impute_dict = { 'imputation_model_name': temp_impute_type, 'data_type': 'temporal', } if temp_impute_type in ['linear', 'quadratic', 'cubic', 'spline']: temp_impute_func = Interpolation temp_impute_dict = { 'interpolation_model_name': temp_impute_type, 'data_type': 'temporal', } if temp_impute_type in ['mrnn', 'tgain']: temp_impute_func = NNImputation temp_impute_dict = { 'imputation_model_name': temp_impute_type, 'data_type': 'temporal', } static_imputation = stat_impute_func( imputation_model_name=stat_impute_type, data_type='static', ) temporal_imputation = temp_impute_func(**temp_impute_dict) imputation_pipeline = compose( static_imputation.fit_transform, temporal_imputation.fit_transform, ) return ( imputation_pipeline(dataset_t), imputation_pipeline(dataset_v), )
def clean_age(df): """ Takes in a pandas dataframe df and cleans up age data, it adds a new series with column name age_interval """ LOW_RANGE = 14 HIGH_RANGE = 90 def year_to_age(year): return CURRENT_YEAR - year result_df = apply_on( df, AGE_COLUMN, compose(lambda x: year_to_age(x) if x > 900 else x, lambda x: MISSING_VALUE if x >= HIGH_RANGE or x <= LOW_RANGE else x)) result_df = replace_missing_values_with_mean(result_df, AGE_COLUMN) return result_df
def parse(self): """ Parses the code """ def is_fn(x): return x.startswith("fn_") fns=filter(is_fn, self.__dict__) self.fns=map(lambda x:x[3:], fns) def assign_line_nbr(line, linenbr): return (linenbr, line) def tokenize(x): try: code, value=versa_int(x) #maybe scalar assert(code==True) return (code, value) except AssertionError: if x in self.fns: return ("function", x) return ("unknown", x) def res((_, tokens)): return (_, map(tokenize, tokens)) ## we want non-empty lines _=self.code.split("\n") _=filter(not_empty, _) _lines=map(assign_line_nbr, _, range(0, len(_))) ## (linenbr, line_data) ## since the separator is the space, ## we want to make sure that coma separated constructs ## are treated as correctly: compress ', ' to ',' f=compose([partial(freplace, ", ", ","), partial(fsplit, " ")]) _sts=map(f, _lines) self.sts=filter(f_not_empty, _sts) self.xsts=map(res, self.sts) return self
def likely_strings(ct): results = map(compose(lambda x: (x, likely_score(x)), lambda x: x.tostring()), [decrypt_with_byte(b, ct) for b in range(256)]) return reduce(lambda acc, x: acc if acc[1] > x[1] else x, results)[0]
def p39(): all_triplets = map(compose(triplets, list), xrange(2,1001)) zipped = zip(map(len, all_triplets), all_triplets) return sum(max(zipped)[1][0])
ngram_reader = NgramReader(args['ngram_filename'], vocab_size=args['vocab_size'], train_proportion=args['train_proportion'], test_proportion=args['test_proportion']) testing_block = ngram_reader.testing_block() vocabulary = ngram_reader.word_array print 'corpus contains %i ngrams' % (ngram_reader.number_of_ngrams) rng = np.random.RandomState(args['random_seed']) data_rng = np.random.RandomState(args['random_seed']) validation_rng = np.random.RandomState(args['random_seed'] + 1) random.seed(args['random_seed']) if not args['dont_run_semantic']: print 'loading semantic similarities' word_similarity = semantic_module.WordSimilarity(vocabulary, args['word_similarity_file'], memmap_filename=args['word_similarity_memmap']) print 'computing terms with semantic distance' indices_in_intersection=set(i for i, v in enumerate(map(compose(np.any, np.isfinite), word_similarity.word_pairwise_sims)) if bool(v) and i != 0) # exclude the rare word else: indices_in_intersection = set() # construct the admm, possibly using some existing semantic or syntactic # model if not model_loaded: existing_embeddings = None if args['existing_embedding_path']: with gzip.open(args['existing_embedding_path']) as f: embedding_model = cPickle.load(f) existing_embeddings = embedding_model.averaged_embeddings() print "loading existing model from %s" % (args['existing_embedding_path'])
from utils import compose import array def xor_arrays(a1, a2): return [a^b for (a, b) in zip(a1, a2)] def to_bin(s): return ''.join(bin(x)[2:] for x in s) def sum_bin_str(binstr): return reduce(lambda acc, x: acc + int(x), binstr, 0) sum_xored_strings = compose(sum_bin_str, to_bin) def hamming_distance(a1, a2): return sum_xored_strings(xor_arrays(a1, a2)) def test(): assert hamming_distance(array.array('B', 'hello'), array.array('B', 'hello')) == 0 assert hamming_distance(array.array('B', 'this is a test'), array.array('B', 'wokka wokka!!!')) == 37