X = new_X train_data_provider = LabelledSequenceMinibatchProvider( X=X[:-500], Y=Y[:-500], batch_size=100, padding='PADDING') print train_data_provider.batches_per_epoch n_validation = 500 validation_data_provider = LabelledSequenceMinibatchProvider( X=X[-n_validation:], Y=Y[-n_validation:], batch_size=n_validation, padding='PADDING') # ~70% after 300 batches of 100, regularizer L2=1e-4 on tweets100k # # model = CSM( # layers=[ # DictionaryEncoding(vocabulary=encoding), # # WordEmbedding( # really a character embedding # dimension=32, # vocabulary_size=len(encoding)), # # SentenceConvolution(
def optimize_and_save(model, alphabet, n_batches, data_file_name, chars_or_words, result_file_name): print result_file_name with gzip.open(data_file_name) as data_file: data = json.loads(data_file.read()) X, Y = map(list, zip(*data)) # shuffle combined = zip(X, Y) random.shuffle(combined) X, Y = map(list, zip(*combined)) # map labels to something useful Y = [ [":)", ":("].index(y) for y in Y ] if chars_or_words == 'chars': X = [list(x) for x in X] elif chars_or_words == 'words': # replace unknowns with an unknown character tokenizer = WordPunctTokenizer() new_X = [] for x in X: new_X.append([w if w in alphabet else 'UNKNOWN' for w in tokenizer.tokenize(x)]) X = new_X else: raise ValueError("I don't know what that means :(") train_data_provider = LabelledSequenceMinibatchProvider( X=X[:-500], Y=Y[:-500], batch_size=50, padding='PADDING') validation_data_provider = LabelledSequenceMinibatchProvider( X=X[-500:], Y=Y[-500:], batch_size=500, padding='PADDING') cost_function = CrossEntropy() objective = CostMinimizationObjective( cost=cost_function, data_provider=train_data_provider) update_rule = AdaGrad( gamma=0.05, model_template=model) regularizer = L2Regularizer(lamb=1e-4) optimizer = SGD( model=model, objective=objective, update_rule=update_rule, regularizer=regularizer) print model monitor_info = [] iteration_info = [] for batch_index, info in enumerate(optimizer): iteration_info.append(info) if batch_index % 10 == 0: X_valid, Y_valid, meta_valid = validation_data_provider.next_batch() Y_hat = model.fprop(X_valid, meta=meta_valid) assert np.all(np.abs(Y_hat.sum(axis=1) - 1) < 1e-6) acc = np.mean(np.argmax(Y_hat, axis=1) == np.argmax(Y_valid, axis=1)) prop_1 = np.argmax(Y_hat, axis=1).mean() monitor_info.append({ 'batch_index': batch_index, 'acc': acc, 'prop_1': prop_1, }) print "B: {}, A: {}, C: {}, Prop1: {}, Param size: {}".format( batch_index, acc, info['cost'], prop_1, np.mean(np.abs(model.pack()))) if batch_index == n_batches - 1: break result = { 'model': model, 'iteration_info': iteration_info, 'monitor_info': monitor_info, } with open(result_file_name, 'w') as result_file: pickle.dump(result, result_file, protocol=-1)
def main(): random.seed(34532) np.random.seed(675) np.set_printoptions(linewidth=100) data_dir = os.path.join("/users/mdenil/code/txtnets/txtnets_deployed/data", "stanfordmovie") trainer = Word2Vec(train=os.path.join( data_dir, "stanfordmovie.train.sentences.clean.projected.txt"), output="stanford-movie-vectors.bin", cbow=1, size=300, window=8, negative=25, hs=0, sample=1e-4, threads=20, binary=1, iter=15, min_count=1) trainer.train() gensim_model = gensim.models.Word2Vec.load_word2vec_format( "/users/mdenil/code/txtnets/txtnets_deployed/code/stanford-movie-vectors.bin", binary=True) # print(gensim_model.most_similar(["refund"])) # print(gensim_model.most_similar(["amazing"])) embedding_model = txtnets_model_from_gensim_word2vec(gensim_model) with open( os.path.join( data_dir, "stanfordmovie.train.sentences.clean.projected.flat.json") ) as data_file: data = json.load(data_file) random.shuffle(data) X, Y = map(list, zip(*data)) Y = [[":)", ":("].index(y) for y in Y] batch_size = 100 n_validation = 500 train_data_provider = LabelledSequenceMinibatchProvider( X=X[:-n_validation], Y=Y[:-n_validation], batch_size=batch_size, padding='PADDING') transformed_train_data_provider = TransformedLabelledDataProvider( data_source=train_data_provider, transformer=embedding_model) validation_data_provider = LabelledSequenceMinibatchProvider( X=X[-n_validation:], Y=Y[-n_validation:], batch_size=batch_size, padding='PADDING') transformed_validation_data_provider = TransformedLabelledDataProvider( data_source=validation_data_provider, transformer=embedding_model) logistic_regression = CSM(layers=[ Sum(axes=['w']), Softmax(n_input_dimensions=gensim_model.syn0.shape[1], n_classes=2) ]) cost_function = CrossEntropy() regularizer = L2Regularizer(lamb=1e-4) objective = CostMinimizationObjective( cost=cost_function, data_provider=transformed_train_data_provider, regularizer=regularizer) update_rule = AdaGrad(gamma=0.1, model_template=logistic_regression) optimizer = SGD(model=logistic_regression, objective=objective, update_rule=update_rule) for batch_index, iteration_info in enumerate(optimizer): if batch_index % 100 == 0: # print(iteration_info['cost']) Y_hat = [] Y_valid = [] for _ in xrange( transformed_validation_data_provider.batches_per_epoch): X_valid_batch, Y_valid_batch, meta_valid = transformed_validation_data_provider.next_batch( ) Y_valid.append(get(Y_valid_batch)) Y_hat.append( get( logistic_regression.fprop(X_valid_batch, meta=meta_valid))) Y_valid = np.concatenate(Y_valid, axis=0) Y_hat = np.concatenate(Y_hat, axis=0) acc = np.mean( np.argmax(Y_hat, axis=1) == np.argmax(Y_valid, axis=1)) print("B: {}, A: {}, C: {}".format(batch_index, acc, iteration_info['cost'])) with open("model_w2vec_logreg.pkl", 'w') as model_file: pickle.dump(embedding_model.move_to_cpu(), model_file, protocol=-1) pickle.dump(logistic_regression.move_to_cpu(), model_file, protocol=-1)
print len(alphabet) # lists of characters. # X = [list(x) for x in X] # lists of words # replace unknowns with an unknown character tokenizer = WordPunctTokenizer() new_X = [] for x in X: new_X.append( [w if w in alphabet else 'UNKNOWN' for w in tokenizer.tokenize(x)]) X = new_X train_data_provider = LabelledSequenceMinibatchProvider(X=X[:-500], Y=Y[:-500], batch_size=100, padding='PADDING') print train_data_provider.batches_per_epoch n_validation = 500 validation_data_provider = LabelledSequenceMinibatchProvider( X=X[-n_validation:], Y=Y[-n_validation:], batch_size=n_validation, padding='PADDING') # ~70% after 300 batches of 100, regularizer L2=1e-4 on tweets100k # # model = CSM( # layers=[
def optimize_and_save(model, alphabet, n_batches, data_file_name, chars_or_words, result_file_name): print result_file_name with gzip.open(data_file_name) as data_file: data = json.loads(data_file.read()) X, Y = map(list, zip(*data)) # shuffle combined = zip(X, Y) random.shuffle(combined) X, Y = map(list, zip(*combined)) # map labels to something useful Y = [[":)", ":("].index(y) for y in Y] if chars_or_words == 'chars': X = [list(x) for x in X] elif chars_or_words == 'words': # replace unknowns with an unknown character tokenizer = WordPunctTokenizer() new_X = [] for x in X: new_X.append([ w if w in alphabet else 'UNKNOWN' for w in tokenizer.tokenize(x) ]) X = new_X else: raise ValueError("I don't know what that means :(") train_data_provider = LabelledSequenceMinibatchProvider(X=X[:-500], Y=Y[:-500], batch_size=50, padding='PADDING') validation_data_provider = LabelledSequenceMinibatchProvider( X=X[-500:], Y=Y[-500:], batch_size=500, padding='PADDING') cost_function = CrossEntropy() objective = CostMinimizationObjective(cost=cost_function, data_provider=train_data_provider) update_rule = AdaGrad(gamma=0.05, model_template=model) regularizer = L2Regularizer(lamb=1e-4) optimizer = SGD(model=model, objective=objective, update_rule=update_rule, regularizer=regularizer) print model monitor_info = [] iteration_info = [] for batch_index, info in enumerate(optimizer): iteration_info.append(info) if batch_index % 10 == 0: X_valid, Y_valid, meta_valid = validation_data_provider.next_batch( ) Y_hat = model.fprop(X_valid, meta=meta_valid) assert np.all(np.abs(Y_hat.sum(axis=1) - 1) < 1e-6) acc = np.mean( np.argmax(Y_hat, axis=1) == np.argmax(Y_valid, axis=1)) prop_1 = np.argmax(Y_hat, axis=1).mean() monitor_info.append({ 'batch_index': batch_index, 'acc': acc, 'prop_1': prop_1, }) print "B: {}, A: {}, C: {}, Prop1: {}, Param size: {}".format( batch_index, acc, info['cost'], prop_1, np.mean(np.abs(model.pack()))) if batch_index == n_batches - 1: break result = { 'model': model, 'iteration_info': iteration_info, 'monitor_info': monitor_info, } with open(result_file_name, 'w') as result_file: pickle.dump(result, result_file, protocol=-1)