def main(dataset='quora', width=50, depth=2, min_batch_size=1, max_batch_size=512, dropout=0.2, dropout_decay=0.0, pooling="mean+max", nb_epoch=5, pieces=3, L2=0.0, use_gpu=False, out_loc=None, quiet=False, job_id=None, ws_api_url=None, rest_api_url=None): global CTX if job_id is not None: CTX = neptune.Context() width = CTX.params.width L2 = CTX.params.L2 nb_epoch = CTX.params.nb_epoch depth = CTX.params.depth max_batch_size = CTX.params.max_batch_size cfg = dict(locals()) if out_loc: out_loc = Path(out_loc) if not out_loc.parent.exists(): raise IOError("Can't open output location: %s" % out_loc) print(cfg) if pooling == 'mean+max': pool_layer = Pooling(mean_pool, max_pool) elif pooling == "mean": pool_layer = mean_pool elif pooling == "max": pool_layer = max_pool else: raise ValueError("Unrecognised pooling", pooling) print("Load spaCy") nlp = get_spacy('en') if use_gpu: Model.ops = CupyOps() print("Construct model") # Bind operators for the scope of the block: # * chain (>>): Compose models in a 'feed forward' style, # i.e. chain(f, g)(x) -> g(f(x)) # * clone (**): Create n copies of a model, and chain them, i.e. # (f ** 3)(x) -> f''(f'(f(x))), where f, f' and f'' have distinct weights. # * concatenate (|): Merge the outputs of two models into a single vector, # i.e. (f|g)(x) -> hstack(f(x), g(x)) Model.lsuv = True #Model.ops = CupyOps() with Model.define_operators({ '>>': chain, '**': clone, '|': concatenate, '+': add }): mwe_encode = ExtractWindow(nW=1) >> BN( Maxout(width, drop_factor=0.0, pieces=pieces)) sent2vec = ( # List[spacy.token.Doc]{B} flatten_add_lengths # : (ids{T}, lengths{B}) >> with_getitem( 0, #(StaticVectors('en', width) HashEmbed(width, 3000) #+ HashEmbed(width, 3000)) #>> Residual(mwe_encode ** 2) ) # : word_ids{T} >> Pooling(mean_pool, max_pool) #>> Residual(BN(Maxout(width*2, pieces=pieces), nO=width*2)**2) >> Maxout(width * 2, pieces=pieces, drop_factor=0.0) >> logistic) model = Siamese(sent2vec, CauchySimilarity(width * 2)) print("Read and parse data: %s" % dataset) if dataset == 'quora': train, dev = datasets.quora_questions() elif dataset == 'snli': train, dev = datasets.snli() elif dataset == 'stackxc': train, dev = datasets.stack_exchange() elif dataset in ('quora+snli', 'snli+quora'): train, dev = datasets.quora_questions() train2, dev2 = datasets.snli() train.extend(train2) dev.extend(dev2) else: raise ValueError("Unknown dataset: %s" % dataset) get_ids = get_word_ids(Model.ops) train_X, train_y = preprocess(model.ops, nlp, train, get_ids) dev_X, dev_y = preprocess(model.ops, nlp, dev, get_ids) with model.begin_training(train_X[:10000], train_y[:10000], **cfg) as (trainer, optimizer): # Pass a callback to print progress. Give it all the local scope, # because why not? trainer.each_epoch.append(track_progress(**locals())) trainer.batch_size = min_batch_size batch_size = float(min_batch_size) print("Accuracy before training", model.evaluate_logloss(dev_X, dev_y)) print("Train") global epoch_train_acc n_iter = 0 for X, y in trainer.iterate(train_X, train_y, progress_bar=not quiet): # Slightly useful trick: Decay the dropout as training proceeds. yh, backprop = model.begin_update(X, drop=trainer.dropout) assert yh.shape == y.shape, (yh.shape, y.shape) assert (yh >= 0.).all(), yh train_acc = ((yh >= 0.5) == (y >= 0.5)).sum() loss = model.ops.xp.abs(yh - y).mean() epoch_train_acc += train_acc backprop(yh - y, optimizer) n_iter += 1 # Slightly useful trick: start with low batch size, accelerate. trainer.batch_size = min(int(batch_size), max_batch_size) batch_size *= 1.001 if out_loc: out_loc = Path(out_loc) print('Saving to', out_loc) with out_loc.open('wb') as file_: pickle.dump(model, file_, -1)
def main( dataset="quora", width=200, depth=2, min_batch_size=1, max_batch_size=512, dropout=0.2, dropout_decay=0.0, pooling="mean+max", nb_epoch=5, pieces=3, L2=0.0, use_gpu=False, out_loc=None, quiet=False, job_id=None, ws_api_url=None, rest_api_url=None, ): cfg = dict(locals()) if out_loc: out_loc = Path(out_loc) if not out_loc.parent.exists(): raise IOError("Can't open output location: %s" % out_loc) print(cfg) if pooling == "mean+max": pool_layer = Pooling(mean_pool, max_pool) elif pooling == "mean": pool_layer = mean_pool elif pooling == "max": pool_layer = max_pool else: raise ValueError("Unrecognised pooling", pooling) print("Load spaCy") nlp = get_spacy("en") if use_gpu: Model.ops = CupyOps() print("Construct model") # Bind operators for the scope of the block: # * chain (>>): Compose models in a 'feed forward' style, # i.e. chain(f, g)(x) -> g(f(x)) # * clone (**): Create n copies of a model, and chain them, i.e. # (f ** 3)(x) -> f''(f'(f(x))), where f, f' and f'' have distinct weights. # * concatenate (|): Merge the outputs of two models into a single vector, # i.e. (f|g)(x) -> hstack(f(x), g(x)) Model.lsuv = True # Model.ops = CupyOps() with Model.define_operators({">>": chain, "**": clone, "|": concatenate, "+": add}): mwe_encode = ExtractWindow(nW=1) >> LN( Maxout(width, drop_factor=0.0, pieces=pieces) ) sent2vec = ( flatten_add_lengths >> with_getitem( 0, (HashEmbed(width, 3000) | StaticVectors("en", width)) >> LN(Maxout(width, width * 2)) >> Residual(mwe_encode) ** depth, ) # : word_ids{T} >> Pooling(mean_pool, max_pool) >> Residual(LN(Maxout(width * 2, pieces=pieces), nO=width * 2)) ** 2 >> logistic ) model = Siamese(sent2vec, CauchySimilarity(width * 2)) print("Read and parse data: %s" % dataset) if dataset == "quora": train, dev = datasets.quora_questions() elif dataset == "snli": train, dev = datasets.snli() elif dataset == "stackxc": train, dev = datasets.stack_exchange() elif dataset in ("quora+snli", "snli+quora"): train, dev = datasets.quora_questions() train2, dev2 = datasets.snli() train.extend(train2) dev.extend(dev2) else: raise ValueError("Unknown dataset: %s" % dataset) get_ids = get_word_ids(Model.ops) train_X, train_y = preprocess(model.ops, nlp, train, get_ids) dev_X, dev_y = preprocess(model.ops, nlp, dev, get_ids) with model.begin_training(train_X[:10000], train_y[:10000], **cfg) as ( trainer, optimizer, ): # Pass a callback to print progress. Give it all the local scope, # because why not? trainer.each_epoch.append(track_progress(**locals())) trainer.batch_size = min_batch_size batch_size = float(min_batch_size) print("Accuracy before training", model.evaluate_logloss(dev_X, dev_y)) print("Train") global epoch_train_acc n_iter = 0 for X, y in trainer.iterate(train_X, train_y, progress_bar=not quiet): # Slightly useful trick: Decay the dropout as training proceeds. yh, backprop = model.begin_update(X, drop=trainer.dropout) assert yh.shape == y.shape, (yh.shape, y.shape) assert (yh >= 0.0).all(), yh train_acc = ((yh >= 0.5) == (y >= 0.5)).sum() loss = model.ops.xp.abs(yh - y).mean() epoch_train_acc += train_acc backprop(yh - y, optimizer) n_iter += 1 # Slightly useful trick: start with low batch size, accelerate. trainer.batch_size = min(int(batch_size), max_batch_size) batch_size *= 1.001 if out_loc: out_loc = Path(out_loc) print("Saving to", out_loc) with out_loc.open("wb") as file_: pickle.dump(model, file_, -1)
def main( dataset="quora", width=64, depth=2, min_batch_size=1, max_batch_size=128, dropout=0.0, dropout_decay=0.0, pooling="mean+max", nb_epoch=20, pieces=3, use_gpu=False, out_loc=None, quiet=False, ): cfg = dict(locals()) if out_loc: out_loc = Path(out_loc) if not out_loc.parent.exists(): raise IOError("Can't open output location: %s" % out_loc) print(cfg) if pooling == "mean+max": pool_layer = Pooling(mean_pool, max_pool) elif pooling == "mean": pool_layer = mean_pool elif pooling == "max": pool_layer = max_pool else: raise ValueError("Unrecognised pooling", pooling) print("Load spaCy") nlp = get_spacy("en") # if use_gpu: # Model.ops = CupyOps() print("Construct model") # Bind operators for the scope of the block: # * chain (>>): Compose models in a 'feed forward' style, # i.e. chain(f, g)(x) -> g(f(x)) # * clone (**): Create n copies of a model, and chain them, i.e. # (f ** 3)(x) -> f''(f'(f(x))), where f, f' and f'' have distinct weights. # * concatenate (|): Merge the outputs of two models into a single vector, # i.e. (f|g)(x) -> hstack(f(x), g(x)) with Model.define_operators({">>": chain, "**": clone, "|": concatenate, "+": add}): mwe_encode = ExtractWindow(nW=1) >> Maxout(width, width * 3, pieces=pieces) embed = StaticVectors("en", width) # + Embed(width, width*2, 5000) # Comments indicate the output type and shape at each step of the pipeline. # * B: Number of sentences in the batch # * T: Total number of words in the batch # (i.e. sum(len(sent) for sent in batch)) # * W: Width of the network (input hyper-parameter) # * ids: ID for each word (integers). # * lengths: Number of words in each sentence in the batch (integers) # * floats: Standard dense vector. # (Dimensions annotated in curly braces.) sent2vec = ( # List[spacy.token.Doc]{B} flatten_add_lengths # : (ids{T}, lengths{B}) >> with_getitem( 0, embed >> mwe_encode ** depth # : word_ids{T} ) # : (floats{T, W}, lengths{B}) >> pool_layer >> Maxout(width, pieces=pieces) >> Maxout(width, pieces=pieces) ) model = ( ((Arg(0) >> sent2vec) | (Arg(1) >> sent2vec)) >> Maxout(width, pieces=pieces) >> Maxout(width, pieces=pieces) >> Softmax(2) ) print("Read and parse data: %s" % dataset) if dataset == "quora": train, dev = datasets.quora_questions() elif dataset == "snli": train, dev = datasets.snli() elif dataset == "stackxc": train, dev = datasets.stack_exchange() elif dataset in ("quora+snli", "snli+quora"): train, dev = datasets.quora_questions() train2, dev2 = datasets.snli() train.extend(train2) dev.extend(dev2) else: raise ValueError("Unknown dataset: %s" % dataset) get_ids = get_word_ids(Model.ops) train_X, train_y = preprocess(model.ops, nlp, train, get_ids) dev_X, dev_y = preprocess(model.ops, nlp, dev, get_ids) print("Initialize with data (LSUV)") print(dev_y.shape) with model.begin_training(train_X[:5000], train_y[:5000], **cfg) as ( trainer, optimizer, ): # Pass a callback to print progress. Give it all the local scope, # because why not? trainer.each_epoch.append(track_progress(**locals())) trainer.batch_size = min_batch_size batch_size = float(min_batch_size) print("Accuracy before training", model.evaluate(dev_X, dev_y)) print("Train") global epoch_train_acc for X, y in trainer.iterate(train_X, train_y, progress_bar=not quiet): # Slightly useful trick: Decay the dropout as training proceeds. yh, backprop = model.begin_update(X, drop=trainer.dropout) assert yh.shape == y.shape, (yh.shape, y.shape) # No auto-diff: Just get a callback and pass the data through. # Hardly a hardship, and it means we don't have to create/maintain # a computational graph. We just use closures. assert (yh >= 0.0).all() train_acc = (yh.argmax(axis=1) == y.argmax(axis=1)).sum() epoch_train_acc += train_acc backprop(yh - y, optimizer) # Slightly useful trick: start with low batch size, accelerate. trainer.batch_size = min(int(batch_size), max_batch_size) batch_size *= 1.001 if out_loc: out_loc = Path(out_loc) print("Saving to", out_loc) with out_loc.open("wb") as file_: pickle.dump(model, file_, -1)
def main(dataset='quora', width=64, depth=2, min_batch_size=1, max_batch_size=128, dropout=0.0, dropout_decay=0.0, pooling="mean+max", nb_epoch=20, pieces=3, use_gpu=False, out_loc=None, quiet=False): cfg = dict(locals()) if out_loc: out_loc = Path(out_loc) if not out_loc.parent.exists(): raise IOError("Can't open output location: %s" % out_loc) print(cfg) if pooling == 'mean+max': pool_layer = Pooling(mean_pool, max_pool) elif pooling == "mean": pool_layer = mean_pool elif pooling == "max": pool_layer = max_pool else: raise ValueError("Unrecognised pooling", pooling) print("Load spaCy") nlp = get_spacy('en') #if use_gpu: # Model.ops = CupyOps() print("Construct model") # Bind operators for the scope of the block: # * chain (>>): Compose models in a 'feed forward' style, # i.e. chain(f, g)(x) -> g(f(x)) # * clone (**): Create n copies of a model, and chain them, i.e. # (f ** 3)(x) -> f''(f'(f(x))), where f, f' and f'' have distinct weights. # * concatenate (|): Merge the outputs of two models into a single vector, # i.e. (f|g)(x) -> hstack(f(x), g(x)) with Model.define_operators({'>>': chain, '**': clone, '|': concatenate, '+': add}): mwe_encode = ExtractWindow(nW=1) >> Maxout(width, width*3, pieces=pieces) embed = StaticVectors('en', width)# + Embed(width, width*2, 5000) # Comments indicate the output type and shape at each step of the pipeline. # * B: Number of sentences in the batch # * T: Total number of words in the batch # (i.e. sum(len(sent) for sent in batch)) # * W: Width of the network (input hyper-parameter) # * ids: ID for each word (integers). # * lengths: Number of words in each sentence in the batch (integers) # * floats: Standard dense vector. # (Dimensions annotated in curly braces.) sent2vec = ( # List[spacy.token.Doc]{B} flatten_add_lengths # : (ids{T}, lengths{B}) >> with_getitem(0, # : word_ids{T} embed >> mwe_encode ** depth ) # : (floats{T, W}, lengths{B}) >> pool_layer >> Maxout(width, pieces=pieces) >> Maxout(width, pieces=pieces) ) model = ( ((Arg(0) >> sent2vec) | (Arg(1) >> sent2vec)) >> Maxout(width, pieces=pieces) >> Maxout(width, pieces=pieces) >> Softmax(2) ) print("Read and parse data: %s" % dataset) if dataset == 'quora': train, dev = datasets.quora_questions() elif dataset == 'snli': train, dev = datasets.snli() elif dataset == 'stackxc': train, dev = datasets.stack_exchange() elif dataset in ('quora+snli', 'snli+quora'): train, dev = datasets.quora_questions() train2, dev2 = datasets.snli() train.extend(train2) dev.extend(dev2) else: raise ValueError("Unknown dataset: %s" % dataset) get_ids = get_word_ids(Model.ops) train_X, train_y = preprocess(model.ops, nlp, train, get_ids) dev_X, dev_y = preprocess(model.ops, nlp, dev, get_ids) print("Initialize with data (LSUV)") print(dev_y.shape) with model.begin_training(train_X[:5000], train_y[:5000], **cfg) as (trainer, optimizer): # Pass a callback to print progress. Give it all the local scope, # because why not? trainer.each_epoch.append(track_progress(**locals())) trainer.batch_size = min_batch_size batch_size = float(min_batch_size) print("Accuracy before training", model.evaluate(dev_X, dev_y)) print("Train") global epoch_train_acc for X, y in trainer.iterate(train_X, train_y, progress_bar=not quiet): # Slightly useful trick: Decay the dropout as training proceeds. yh, backprop = model.begin_update(X, drop=trainer.dropout) assert yh.shape == y.shape, (yh.shape, y.shape) # No auto-diff: Just get a callback and pass the data through. # Hardly a hardship, and it means we don't have to create/maintain # a computational graph. We just use closures. assert (yh >= 0.).all() train_acc = (yh.argmax(axis=1) == y.argmax(axis=1)).sum() epoch_train_acc += train_acc backprop(yh-y, optimizer) # Slightly useful trick: start with low batch size, accelerate. trainer.batch_size = min(int(batch_size), max_batch_size) batch_size *= 1.001 if out_loc: out_loc = Path(out_loc) print('Saving to', out_loc) with out_loc.open('wb') as file_: pickle.dump(model, file_, -1)