def run(train, test): init = Gaussian(scale=0.01) layers = [ Conv((3, 3, 128), init=init, activation=Rectlin(), strides=dict(str_h=1, str_w=2)), Conv((3, 3, 256), init=init, batch_norm=True, activation=Rectlin()), Pooling(2, strides=2), Conv((2, 2, 512), init=init, batch_norm=True, activation=Rectlin()), DeepBiRNN(256, init=init, activation=Rectlin(), reset_cells=True, depth=3), RecurrentLast(), Affine(32, init=init, batch_norm=True, activation=Rectlin()), Affine(nout=common['nclasses'], init=init, activation=Softmax()) ] model = Model(layers=layers) opt = Adadelta() metric = Misclassification() callbacks = Callbacks(model, eval_set=test, metric=metric, **args.callback_args) cost = GeneralizedCost(costfunc=CrossEntropyBinary()) model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks) return model
def create_network(): init = GlorotUniform() layers = [ Conv((3, 3, 128), init=init, activation=Rectlin(), strides=dict(str_h=1, str_w=2)), Conv((3, 3, 256), init=init, batch_norm=True, activation=Rectlin()), Pooling(2, strides=2), Conv((2, 2, 512), init=init, batch_norm=True, activation=Rectlin()), DeepBiRNN(256, init=init, activation=Rectlin(), reset_cells=True, depth=3), RecurrentLast(), Affine(32, init=init, batch_norm=True, activation=Rectlin()), Affine(nout=2, init=init, activation=Softmax()) ] return Model(layers=layers), GeneralizedCost(costfunc=CrossEntropyBinary())
layers = [ LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=False), Affine(train_set.nfeatures, init, bias=init, activation=Identity()) ] else: layers = [ LSTM(hidden, init, activation=Logistic(), gate_activation=Tanh(), reset_cells=True), RecurrentLast(), Affine(train_set.nfeatures, init, bias=init, activation=Identity()) ] model = Model(layers=layers) cost = GeneralizedCost(MeanSquared()) optimizer = RMSProp(stochastic_round=args.rounding) callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) # fit model model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
def load_sent_encoder(model_dict, expand_vocab=False, orig_vocab=None, w2v_vocab=None, w2v_path=None, use_recur_last=False): """ Custom function to load the model saved from skip-thought vector training and reconstruct another model just using the LUT and encoding layer for transfering sentence representations. Arguments: model_dict: saved s2v model dict expand_vocab: Bool to indicate if w2v vocab expansion should be attempted orig_vocab: If using expand_vocab, original vocabulary dict is needed for expansion w2v_vocab: If using expand_vocab, w2v vocab dict w2v_path: Path to trained w2v binary (GoogleNews) use_recur_last: If True a RecurrentLast layer is used as the final layer, if False a RecurrentSum layer is used as the last layer of the returned model. """ embed_dim = model_dict['model']['config']['embed_dim'] model_train = Model(model_dict) # RecurrentLast should be used for semantic similarity evaluation if use_recur_last: last_layer = RecurrentLast() else: last_layer = RecurrentSum() if expand_vocab: assert orig_vocab and w2v_vocab, ("All vocabs and w2v_path " + "need to be specified when using expand_vocab") neon_logger.display("Computing vocab expansion regression...") # Build inverse word dictionary (word -> index) word_idict = dict() for kk, vv in orig_vocab.items(): # Add 2 to the index to allow for padding and oov tokens as 0 and 1 word_idict[vv + 2] = kk word_idict[0] = '' word_idict[1] = 'UNK' # Create dictionary of word -> vec orig_word_vecs = get_embeddings(model_train.layers.layer_dict['lookupTable'], word_idict) # Load GooleNews w2v weights w2v_W, w2v_dim, _ = get_google_word2vec_W(w2v_path, w2v_vocab) # Compute the expanded vocab lookup table from a linear mapping of # words2vec into RNN word space init_embed = compute_vocab_expansion(orig_word_vecs, w2v_W, w2v_vocab, word_idict) init_embed_dev = model_train.be.array(init_embed) w2v_vocab_size = len(w2v_vocab) table = LookupTable(vocab_size=w2v_vocab_size, embedding_dim=embed_dim, init=init_embed_dev, pad_idx=0) model = Model(layers=[table, model_train.layers.layer_dict['encoder'], last_layer]) else: model = Model(layers=[model_train.layers.layer_dict['lookupTable'], model_train.layers.layer_dict['encoder'], last_layer]) return model
def main(): parser = NeonArgparser(__doc__) args = parser.parse_args(gen_be=False) #mat_data = sio.loadmat('../data/timeseries/02_timeseries.mat') #ts = V1TimeSeries(mat_data['timeseries'], mat_data['stim'], binning=10) seq_len = 30 hidden = 20 be = gen_backend(**extract_valid_args(args, gen_backend)) kohn = KohnV1Dataset(path='../tmp/') kohn.gen_iterators(seq_len) import pdb; pdb.set_trace() train_spike_set = V1IteratorSequence(ts.train, seq_len, return_sequences=False) valid_spike_set = V1IteratorSequence(ts.test, seq_len, return_sequences=False) init = GlorotUniform() # dataset = MNIST(path=args.data_dir) # (X_train, y_train), (X_test, y_test), nclass = dataset.load_data() # train_set = ArrayIterator([X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28)) # valid_set = ArrayIterator([X_test, X_test], y_test, nclass=nclass, lshape=(1, 28, 28)) # # weight initialization # init_norm = Gaussian(loc=0.0, scale=0.01) # # initialize model # path1 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()), # Affine(nout=100, init=init_norm, activation=Rectlin())]) # path2 = Sequential(layers=[Affine(nout=100, init=init_norm, activation=Rectlin()), # Affine(nout=100, init=init_norm, activation=Rectlin())]) # layers = [MergeMultistream(layers=[path1, path2], merge="stack"), # Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))] spike_rnn_path = Sequential( layers = [ LSTM(hidden, init, activation=Logistic(), gate_activation=Logistic(), reset_cells=False), Dropout(keep=0.5), LSTM(hidden, init, activation=Logistic(), gate_activation=Logistic(), reset_cells=False), #Dropout(keep=0.85), RecurrentLast(), Affine(train_set.nfeatures, init, bias=init, activation=Identity(), name='spike_in')]) stim_rnn_path = Sequential( layers = [ LSTM(hidden, init, activation=Logistic(), gate_activation=Logistic(), reset_cells=False), Dropout(keep=0.5), RecurrentLast(), Affine(1, init, bias=init, activation=Identity(), name='stim')]) layers = [ MergeMultiStream( layers = [ spike_rnn_path, stim_rnn_path], merge="stack"), Affine(train_set.nfeatures, init, bias=init, activation=Identity(), name='spike_out'), Round() ] model = Model(layers=layers) sched = ExpSchedule(decay=0.7) # cost = GeneralizedCost(SumSquared()) cost = GeneralizedCost(MeanSquared()) optimizer_two = RMSProp(stochastic_round=args.rounding) optimizer_one = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9, schedule=sched) opt = MultiOptimizer({'default': optimizer_one, 'Bias': optimizer_two, 'special_linear': optimizer_two}) callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args) callbacks.add_hist_callback(filter_key = ['W']) #callbacks.add_callback(MetricCallback(eval_set=valid_set, metric=FractionExplainedVariance(), epoch_freq=args.eval_freq)) #callbacks.add_callback(MetricCallback(eval_set=valid_set,metric=Accuracy(), epoch_freq=args.eval_freq)) model.fit(train_set, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks) train_output = model.get_outputs( train_set).reshape(-1, train_set.nfeatures) valid_output = model.get_outputs( valid_set).reshape(-1, valid_set.nfeatures) train_target = train_set.y_series valid_target = valid_set.y_series tfev = fev(train_output, train_target, train_set.mean) vfev = fev(valid_output, valid_target, valid_set.mean) neon_logger.display('Train FEV: %g, Valid FEV: %g' % (tfev, vfev)) # neon_logger.display('Train Mean: %g, Valid Mean: %g' % (train_set.mean, valid_set.mean)) plt.figure() plt.plot(train_output[:, 0], train_output[ :, 1], 'bo', label='prediction') plt.plot(train_target[:, 0], train_target[:, 1], 'r.', label='target') plt.legend() plt.title('Neon on training set') plt.savefig('neon_series_training_output.png') plt.figure() plt.plot(valid_output[:, 0], valid_output[ :, 1], 'bo', label='prediction') plt.plot(valid_target[:, 0], valid_target[:, 1], 'r.', label='target') plt.legend() plt.title('Neon on validation set') plt.savefig('neon_series_validation_output.png')