def forward(net, input_data, deploy=False): """Defines and creates the ReInspect network given the net, input data and configurations.""" net.clear_forward() if deploy: image = np.array(input_data["image"]) else: image = np.array(input_data["image"]) label = np.array(input_data["label"]) net.f(NumpyData("label", data=label)) net.f(NumpyData("image", data=image)) generate_decapitated_alexnet(net) net.f( InnerProduct(name="fc8_dish", bottoms=["fc7"], param_lr_mults=[1.0 * 10, 2.0 * 10], param_decay_mults=[1.0, 0.0], weight_filler=Filler("gaussian", 0.01), bias_filler=Filler("constant", 0.0), num_output=128)) net.f(Softmax("dish_probs", bottoms=["fc8_dish"])) if not deploy: net.f(SoftmaxWithLoss(name="loss", bottoms=["fc8_dish", "label"])) # net.f(Accuracy(name="dish_accuracy",bottoms=["fc8_dish_23", "label"])) if deploy: probs = np.array(net.blobs["dish_probs"].data) return probs else: return None
def generate_intermediate_layers(net): """Takes the output from the decapitated googlenet and transforms the output from a NxCxWxH to (NxWxH)xCx1x1 that is used as input for the lstm layers. N = batch size, C = channels, W = grid width, H = grid height.""" net.f(Convolution("post_fc7_conv", bottoms=["inception_final_output"], param_lr_mults=[1., 2.], param_decay_mults=[0., 0.], num_output=1024, kernel_dim=(1, 1), weight_filler=Filler("gaussian", 0.005), bias_filler=Filler("constant", 0.))) net.f(Power("lstm_fc7_conv", scale=0.01, bottoms=["post_fc7_conv"])) net.f(Transpose("lstm_input", bottoms=["lstm_fc7_conv"]))
def forward(net, input_data, net_config, deploy=False): """Defines and creates the ReInspect network given the net, input data and configurations.""" net.clear_forward() if deploy: image = np.array(input_data["image"]) else: image = np.array(input_data["image"]) box_flags = np.array(input_data["box_flags"]) boxes = np.array(input_data["boxes"]) numbers = np.array(input_data["numbers"]) net.f(NumpyData("image", data=image)) generate_decapitated_googlenet(net, net_config) generate_intermediate_layers(net) if not deploy: generate_ground_truth_layers(net, box_flags, boxes) generate_number_ground_truth_layers(net, numbers) generate_lstm_seeds(net, net_config["lstm_num_cells"]) filler = Filler("uniform", net_config["init_range"]) concat_bottoms = {"score": [], "bbox": []} lstm_params = (net_config["lstm_num_cells"], filler) for step in range(net_config["max_len"]): lstm_out = get_lstm_params(step) generate_lstm(net, step, lstm_params, lstm_out, net_config["dropout_ratio"]) generate_inner_products(net, step, filler) concat_bottoms["score"].append("ip_conf%d" % step) concat_bottoms["bbox"].append("ip_bbox%d" % step) net.f(Concat("score_concat", bottoms=concat_bottoms["score"], concat_dim=2)) net.f(Concat("bbox_concat", bottoms=concat_bottoms["bbox"], concat_dim=2)) generate_number_layers(net, step, filler, net_config["max_len"]) if not deploy: generate_losses(net, net_config) generate_number_losses(net, net_config) if deploy: bbox = [ np.array(net.blobs["ip_bbox%d" % j].data) for j in range(net_config["max_len"]) ] conf = [ np.array(net.blobs["ip_soft_conf%d" % j].data) for j in range(net_config["max_len"]) ] num = np.array(net.blobs["ip_number"].data) return (bbox, conf, num) else: return None
def forward(net, input_data, net_config, deploy=False): net.clear_forward() if deploy: image = np.array(input_data["image"]) else: image = np.array(input_data["image"]) box_flags = np.array(input_data["box_flags"]) boxes = np.array(input_data["boxes"]) net.f(NumpyData("image", data=image)) generate_decapitated_googlenet(net) generate_googlenet_to_lstm_layers(net) if not deploy: generate_ground_truth_layers(net, box_flags, boxes) generate_lstm_seeds(net, net_config["lstm_num_cells"]) filler = Filler("uniform", net_config["init_range"]) score_concat_bottoms = [] bbox_concat_bottoms = [] for step in range(net_config["max_len"]): hidden_bottom, mem_bottom = get_lstm_params(step) generate_lstm(net, step, net_config["lstm_num_cells"], hidden_bottom, mem_bottom, filler, net_config["dropout_ratio"]) generate_inner_products(net, step, filler) score_concat_bottoms.append("ip_conf%d" % step) bbox_concat_bottoms.append("ip_bbox%d" % step) net.f(Concat("score_concat", bottoms=score_concat_bottoms, concat_dim=2)) net.f(Concat("bbox_concat", bottoms=bbox_concat_bottoms, concat_dim=2)) if not deploy: generate_losses(net) if deploy: bbox = [ np.array(net.blobs["ip_bbox%d" % j].data) for j in range(net_config["max_len"]) ] conf = [ np.array(net.blobs["ip_soft_conf%d" % j].data) for j in range(net_config["max_len"]) ] return (bbox, conf) else: return None
def evaluate_forward(net, net_config): net.clear_forward() length = 20 net.f(NumpyData("prev_hidden", np.zeros((1, net_config["mem_cells"])))) net.f(NumpyData("prev_mem", np.zeros((1, net_config["mem_cells"])))) filler = Filler("uniform", net_config["init_range"]) predictions = [] value = 0.5 for _ in range(length): # We'll be updating values in place for efficient memory usage. This # will break backprop and cause warnings. Use clear_forward to suppress. net.clear_forward() # Add 0.5 to the sum at each step net.f(NumpyData("value", data=np.array(value).reshape((1, 1)))) prev_hidden = "prev_hidden" prev_mem = "prev_mem" net.f(Concat("lstm_concat", bottoms=[prev_hidden, "value"])) net.f( LstmUnit("lstm", net_config["mem_cells"], bottoms=["lstm_concat", prev_mem], param_names=[ "input_value", "input_gate", "forget_gate", "output_gate" ], weight_filler=filler, tops=["next_hidden", "next_mem"])) net.f(InnerProduct("ip", 1, bottoms=["next_hidden"])) predictions.append(float(net.blobs["ip"].data.flatten()[0])) # set up for next prediction by copying LSTM outputs back to inputs net.blobs["prev_hidden"].data_tensor.copy_from( net.blobs["next_hidden"].data_tensor) net.blobs["prev_mem"].data_tensor.copy_from( net.blobs["next_mem"].data_tensor) targets = np.cumsum([value for _ in predictions]) residuals = [x - y for x, y in zip(predictions, targets)] return targets, predictions, residuals
def evaluate_forward(net, net_config, feat,scene_feat): net.clear_forward() feat_dim=feat.shape[1] net.f(NumpyData("prev_hidden", np.zeros((1, net_config["mem_cells"])))) net.f(NumpyData("prev_mem", np.zeros((1, net_config["mem_cells"])))) filler = Filler("uniform", net_config["init_range"]) length = feat.shape[0]+1 for step in range(length): net.clear_forward() if step==0: value=scene_feat.reshape(1,feat_dim) else: value = feat[step-1,:].reshape(1,feat_dim) net.f(NumpyData("value", data=value )) prev_hidden = "prev_hidden" prev_mem = "prev_mem" net.f(Concat("lstm_concat", bottoms=[prev_hidden, "value"])) net.f(LstmUnit("lstm", net_config["mem_cells"], bottoms=["lstm_concat", prev_mem], param_names=[ "input_value", "input_gate", "forget_gate", "output_gate"], weight_filler=filler, tops=["next_hidden", "next_mem"])) net.f(InnerProduct("ip", 1, bottoms=["next_hidden"])) net.blobs["prev_hidden"].data_tensor.copy_from( net.blobs["next_hidden"].data_tensor) net.blobs["prev_mem"].data_tensor.copy_from( net.blobs["next_mem"].data_tensor) for i in xrange(6): net.f(InnerProduct("ip%d"%i, 2, bottoms=["next_hidden"])) net.f(Softmax("prob%d"%i, bottoms=["ip%d"%i])) predictions=[] for i in xrange(6): predictions.append(float(net.blobs["prob%d"%i].data.flatten()[1])) return predictions
def forward(net, net_config): net.clear_forward() length = random.randrange(net_config["min_len"], net_config["max_len"]) # initialize all weights in [-0.1, 0.1] filler = Filler("uniform", net_config["init_range"]) # initialize the LSTM memory with all 0's net.f( NumpyData( "lstm_seed", np.zeros((net_config["batch_size"], net_config["mem_cells"])))) accum = np.zeros((net_config["batch_size"], )) # Begin recurrence through 5 - 15 inputs for step in range(length): # Generate random inputs value = np.array( [random.random() for _ in range(net_config["batch_size"])]) # Set data of value blob to contain a batch of random numbers net.f(NumpyData("value%d" % step, value.reshape((-1, 1)))) accum += value for l in lstm_layers(net, step, filler, net_config): net.f(l) # Add a fully connected layer with a bottom blob set to be the last used # LSTM cell. Note that the network structure is now a function of the data net.f( InnerProduct("ip", 1, bottoms=["lstm_hidden%d" % (length - 1)], weight_filler=filler)) # Add a label for the sum of the inputs net.f(NumpyData("label", np.reshape(accum, (-1, 1)))) # Compute the Euclidean loss between the preiction and label, # used for backprop net.f(EuclideanLoss("euclidean", bottoms=["ip", "label"]))
def forward(net, input_data, net_config, deploy=False): """Defines and creates the ReInspect network given the net, input data and configurations.""" net.clear_forward() net.f( NumpyData("wordvec_layer", data=np.array(input_data["wordvec_layer"]))) # 128*38*100*1 net.f(NumpyData("target_words", data=np.array(input_data["target_words"]))) # 128*100*1*1 tops = [] slice_point = [] for i in range(net_config['max_len']): tops.append('label%d' % i) if i != 0: slice_point.append(i) net.f( Slice("label_slice_layer", slice_dim=1, bottoms=["target_words"], tops=tops, slice_point=slice_point)) tops = [] slice_point = [] for i in range(net_config['max_len']): tops.append('target_wordvec%d_4d' % i) if i != 0: slice_point.append(i) net.f( Slice("wordvec_slice_layer", slice_dim=2, bottoms=['wordvec_layer'], tops=tops, slice_point=slice_point)) for i in range(net_config["max_len"]): # 128*38*1*1 -> 128*38 net.f(""" name: "target_wordvec%d" type: "Reshape" bottom: "target_wordvec%d_4d" top: "target_wordvec%d" reshape_param { shape { dim: 0 # copy the dimension from below dim: -1 } } """ % (i, i, i)) #net.f(Reshape('target_wordvec%d'%i, bottoms = ['target_wordvec%d_4d'%i], shape = [0,-1])) filler = Filler("uniform", net_config["init_range"]) for i in range(net_config['max_len']): if i == 0: net.f( NumpyData( "dummy_layer", np.zeros((net_config["batch_size"], net_config["lstm_num_cells"])))) net.f( NumpyData( "dummy_mem_cell", np.zeros((net_config["batch_size"], net_config["lstm_num_cells"])))) for j in range(net_config['lstm_num_stacks']): bottoms = [] if j == 0: bottoms.append('target_wordvec%d' % i) if j >= 1: bottoms.append('dropout%d_%d' % (j - 1, i)) if i == 0: bottoms.append("dummy_layer") else: bottoms.append('lstm%d_hidden%d' % (j, i - 1)) net.f(Concat('concat%d_layer%d' % (j, i), bottoms=bottoms)) param_names = [] for k in range(4): param_names.append('lstm%d_param_%d' % (j, k)) bottoms = ['concat%d_layer%d' % (j, i)] if i == 0: bottoms.append('dummy_mem_cell') else: bottoms.append('lstm%d_mem_cell%d' % (j, i - 1)) net.f( LstmUnit('lstm%d_layer%d' % (j, i), net_config["lstm_num_cells"], weight_filler=filler, param_names=param_names, bottoms=bottoms, tops=[ 'lstm%d_hidden%d' % (j, i), 'lstm%d_mem_cell%d' % (j, i) ])) net.f( Dropout('dropout%d_%d' % (j, i), net_config["dropout_ratio"], bottoms=['lstm%d_hidden%d' % (j, i)])) bottoms = [] for i in range(net_config['max_len']): bottoms.append('dropout%d_%d' % (net_config['lstm_num_stacks'] - 1, i)) net.f(Concat('hidden_concat', bottoms=bottoms, concat_dim=0)) net.f( InnerProduct("inner_product", net_config['vocab_size'], bottoms=["hidden_concat"], weight_filler=filler)) bottoms = [] for i in range(net_config['max_len']): bottoms.append('label%d' % i) net.f(Concat('label_concat', bottoms=bottoms, concat_dim=0)) if deploy: net.f(Softmax("word_probs", bottoms=["inner_product"])) else: net.f( SoftmaxWithLoss("word_loss", bottoms=["inner_product", "label_concat"], ignore_label=net_config['zero_symbol']))
def forward(net, input_data, net_config, phase='train', deploy=False): """Defines and creates the ReInspect network given the net, input data and configurations.""" net.clear_forward() batch_ws_i = input_data["ws_i"] batch_stop_i = [net_config['max_len']] * net_config['batch_size'] wordvec_layer = input_data["wordvec_layer"] # 128*38*100*1 net.f(NumpyData("target_words", data=np.array(input_data["target_words"]))) # 128*100*1*1 tops = [] slice_point = [] for i in range(net_config['max_len']): tops.append('label%d' % i) if i != 0: slice_point.append(i) net.f( Slice("label_slice_layer", slice_dim=1, bottoms=["target_words"], tops=tops, slice_point=slice_point)) net.f(NumpyData("target_wordvec%d" % 0, data=wordvec_layer[:, :, 0, 0])) # start symbol, 128*38 filler = Filler("uniform", net_config["init_range"]) for i in range(net_config['max_len']): if i == 0: net.f( NumpyData( "dummy_layer", np.zeros((net_config["batch_size"], net_config["lstm_num_cells"])))) net.f( NumpyData( "dummy_mem_cell", np.zeros((net_config["batch_size"], net_config["lstm_num_cells"])))) for j in range(net_config['lstm_num_stacks']): bottoms = [] if j == 0: bottoms.append('target_wordvec%d' % i) if j >= 1: bottoms.append('dropout%d_%d' % (j - 1, i)) if i == 0: bottoms.append("dummy_layer") else: bottoms.append('lstm%d_hidden%d' % (j, i - 1)) net.f(Concat('concat%d_layer%d' % (j, i), bottoms=bottoms)) param_names = [] for k in range(4): param_names.append('lstm%d_param_%d' % (j, k)) bottoms = ['concat%d_layer%d' % (j, i)] if i == 0: bottoms.append('dummy_mem_cell') else: bottoms.append('lstm%d_mem_cell%d' % (j, i - 1)) net.f( LstmUnit('lstm%d_layer%d' % (j, i), net_config["lstm_num_cells"], weight_filler=filler, param_names=param_names, bottoms=bottoms, tops=[ 'lstm%d_hidden%d' % (j, i), 'lstm%d_mem_cell%d' % (j, i) ])) net.f( Dropout('dropout%d_%d' % (j, i), net_config["dropout_ratio"], bottoms=['lstm%d_hidden%d' % (j, i)])) net.f( InnerProduct("ip%d" % i, net_config['vocab_size'], bottoms=[ 'dropout%d_%d' % (net_config['lstm_num_stacks'] - 1, i) ], weight_filler=filler)) if i < net_config['max_len'] - 1: tar_wordvec = np.array(wordvec_layer[:, :, i + 1, 0]) # 128*38 if phase == 'test': net.f(Softmax("word_probs%d" % i, bottoms=["ip%d" % i])) probs = net.blobs["word_probs%d" % i].data for bi in range(net_config['batch_size']): if i >= batch_ws_i[bi] and i < batch_stop_i[bi]: vec = [0] * net_config["vocab_size"] peakIndex = np.argmax(probs[bi, :]) if peakIndex == net_config['whitespace_symbol']: batch_stop_i[bi] = i + 1 vec[peakIndex] = 1 tar_wordvec[bi, :] = vec net.f(NumpyData("target_wordvec%d" % (i + 1), data=tar_wordvec)) bottoms = [] for i in range(net_config['max_len']): bottoms.append("ip%d" % i) net.f(Concat('ip_concat', bottoms=bottoms, concat_dim=0)) bottoms = [] for i in range(net_config['max_len']): bottoms.append('label%d' % i) net.f(Concat('label_concat', bottoms=bottoms, concat_dim=0)) if deploy: net.f(Softmax("word_probs", bottoms=["ip_concat"])) net.f( SoftmaxWithLoss("word_loss", bottoms=["ip_concat", "label_concat"], ignore_label=net_config['zero_symbol']))