def train_translation_matrix(source_file, target_file, dict_file, out_file): """Trains a transltion matrix between the source and target languages, using the words in dict_file as anchor points and writing the translation matrix to out_file Note that the source language file and target language file must be in the word2vec C ASCII format :param source_file: The name of the source language file :param target_file: The name of the target language file :param dict_file: The name of the file with the bilingual dictionary :param out_file: The name of the file to write the translation matrix to """ log.info("Reading the training data") train_data = read_dict(dict_file) #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words, target_words = zip(*train_data) log.info("Reading: %s" % source_file) source_sp = Space.build(source_file, set(source_words)) source_sp.normalize() log.info("Reading: %s" % target_file) target_sp = Space.build(target_file, set(target_words)) target_sp.normalize() log.debug('Words in the source space: %s' % source_sp.row2id) log.debug('Words in the target space: %s' % target_sp.row2id) log.info("Learning the translation matrix") log.info("Training data: %s" % str(train_data)) tm = train_tm(source_sp, target_sp, train_data) log.info("Printing the translation matrix") np.savetxt(out_file, tm)
def train_wrapper(seed_fn, source_fn, target_fn, reverse=False, mx_path=None, train_size=5000): logging.info("Training...") seed_trans = read_dict(seed_fn, reverse=reverse) #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words = set(seed_trans.iterkeys()) target_words = set().union(*seed_trans.itervalues()) source_sp = Space.build(source_fn, lexicon=source_words) source_sp.normalize() target_sp = Space.build(target_fn, lexicon=target_words) target_sp.normalize() logging.info("Learning the translation matrix") tm, used_for_train = train_tm(source_sp, target_sp, seed_trans, train_size) mx_path = default_output_fn(mx_path, seed_fn, source_fn, target_fn,) logging.info("Saving the translation matrix to {}".format(mx_path)) np.save('{}.npy'.format(mx_path), tm) pickle.dump(used_for_train, open('{}.train_wds'.format(mx_path), mode='w')) return tm, used_for_train
def build_src_wrapper(self, source_file, test_wpairs): """ In the _source_ space, we only need to load vectors for the words in test. Semantic spaces may contain additional words. All words in the _target_ space are used as the search space """ source_words = set(test_wpairs.iterkeys()) if self.additional: #read all the words in the space lexicon = set(np.loadtxt(source_file, skiprows=1, dtype=str, comments=None, usecols=(0,)).flatten()) #the max number of additional+test elements is bounded by the size #of the lexicon self.additional = min(self.additional, len(lexicon) - len(source_words)) random.seed(100) logging.info("Sampling {} additional elements".format(self.additional)) # additional lexicon: lexicon = random.sample(list(lexicon.difference(source_words)), self.additional) #load the source space source_sp = Space.build(source_file, lexicon=source_words.union(set(lexicon)), max_rows=1000) else: source_sp = Space.build(source_file, lexicon=source_words, max_rows=1000) source_sp.normalize() return source_sp
def test_wrapper(self): self.load_tr_mx() logging.info('The denominator of precision {} OOV words'.format( 'includes' if self.args.coverage else "doesn't include")) test_wpairs = read_dict(self.args.seed_fn, reverse=self.args.reverse, needed=1000 if self.args.coverage else -1, exclude=self.exclude_from_test) source_sp = self.build_src_wrapper(self.args.source_fn, test_wpairs) target_sp = Space.build(self.args.target_fn) target_sp.normalize() test_wpairs, _ = get_invocab_trans(source_sp, target_sp, test_wpairs, needed=1000) """ #turn test data into a dictionary (a word can have mutiple translation) gold = collections.defaultdict(set, test_wpairs) for sr, tg in test_wpairs: gold[sr].add(tg) """ logging.info( "Mapping all the elements loaded in the source space") mapped_source_sp = apply_tm(source_sp, self.tr_mx) if hasattr(self.args, 'mapped_vecs') and self.args.mapped_vecs: logging.info("Printing mapped vectors: %s" % self.args.mapped_vecs) np.savetxt("%s.vecs.txt" % self.args.mapped_vecs, mapped_source_sp.mat) np.savetxt("%s.wds.txt" % self.args.mapped_vecs, mapped_source_sp.id2word, fmt="%s") return score(mapped_source_sp, target_sp, test_wpairs, self.additional)
def build(cls, core_space, **kwargs): """ Reads in data files and extracts the data to construct a semantic space. If the data is read in dense format and no columns are provided, the column indexing structures are set to empty. Args: data: file containing the counts format: format on the input data file: one of sm/dm rows: file containing the row elements. Optional, if not provided, extracted from the data file. cols: file containing the column elements Returns: A semantic space build from the input data files. Raises: ValueError: if one of data/format arguments is missing. if cols is missing and format is "sm" if the input columns provided are not consistent with the shape of the matrix (for "dm" format) """ sp = Space.build(**kwargs) mat = sp._cooccurrence_matrix id2row = sp.id2row row2id = sp.row2id return PeripheralSpace(core_space, mat, id2row, row2id)
def main(sys_argv): try: opts, argv = getopt.getopt(sys_argv[1:], "ho:", ["help", "output="]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit(1) out_file = "./tm" for opt, val in opts: if opt in ("-o", "--output"): out_file = val elif opt in ("-h", "--help"): usage(0) else: usage(1) if len(argv) == 3: source_file = argv[1] target_file = argv[2] dict_file = argv[0] else: print(str(err)) usage(1) print("Reading the training data") train_data = read_dict(dict_file) #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words, target_words = zip(*train_data) print("Reading: %s" % source_file) source_sp = Space.build(source_file, set(source_words)) source_sp.normalize() print("Reading: %s" % target_file) target_sp = Space.build(target_file, set(target_words)) target_sp.normalize() print("Learning the translation matrix") tm = train_tm(source_sp, target_sp, train_data) print("Printing the translation matrix") np.savetxt("%s.txt" % out_file, tm)
test_file = argv[3] model = eval(argv[4]) dict_file = argv[0] else: print str(err) usage(1) print "Reading the training data" train_data = read_dict(dict_file) print train_data #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words, target_words = zip(*train_data) print "Reading: %s" % source_file source_sp = Space.build(source_file, set(source_words)) source_sp.normalize() print "Reading: %s" % target_file target_sp = Space.build(target_file, set(target_words)) target_sp.normalize() print "Learning the translation matrix" tm = train_tm_model(source_sp, target_sp, train_data, model) #print "Printing the translation matrix" #np.savetxt("%s.txt" % out_file, tm) print "Reading the test data" test_data = read_dict(test_file)
print "Loading the translation matrix" tm = np.loadtxt(tm_file) print "Reading the test data" test_data = read_dict(test_file) # in the _source_ space, we only need to load vectors for the words in test. # semantic spaces may contain additional words, ALL words in the _target_ # space are used as the search space source_words, _ = zip(*test_data) source_words = set(source_words) print "Reading: %s" % source_file if not additional: source_sp = Space.build(source_file, source_words) else: # read all the words in the space lexicon = set(np.loadtxt(source_file, skiprows=1, dtype=str, comments=None, usecols=(0,)).flatten()) # the max number of additional+test elements is bounded by the size # of the lexicon additional = min(additional, len(lexicon) - len(source_words)) # we sample additional elements that are not already in source_words random.seed(100) lexicon = random.sample(list(lexicon.difference(source_words)), additional) # load the source space source_sp = Space.build(source_file, source_words.union(set(lexicon))) source_sp.normalize()
source_file = argv[1] target_file = argv[2] dict_file = argv[0] else: print str(err) usage(1) print "Reading the training data" train_data = read_dict(dict_file) print train_data #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words, target_words = zip(*train_data) print "Reading: %s" % source_file source_sp = Space.build(source_file, set(source_words)) source_sp.normalize() print "Reading: %s" % target_file target_sp = Space.build(target_file, set(target_words)) target_sp.normalize() print "Learning the translation matrix" tm = train_tm(source_sp, target_sp, train_data) print "Printing the translation matrix" np.savetxt("%s.txt" % out_file, tm) if __name__ == '__main__': main(sys.argv)
target_file = argv[2] dict_file = argv[0] else: print str(err) usage(1) print "Reading the training data" train_data = read_dict(dict_file) #we only need to load the vectors for the words in the training data #semantic spaces contain additional words source_words, target_words = zip(*train_data) print "Reading: %s" % source_file source_sp = Space.build(source_file, set(source_words)) source_sp.normalize() print "Reading: %s" % target_file target_sp = Space.build(target_file, set(target_words)) target_sp.normalize() print "Learning the translation matrix" print "Training data: %s" % str(train_data) tm = train_tm(source_sp, target_sp, train_data) print "Printing the translation matrix" np.savetxt("%s.txt" % out_file, tm) if __name__ == '__main__':
def main(sys_argv): try: opts, argv = getopt.getopt(sys_argv[1:], "ho:c:l:m:1:2:t:a:v:", [ "help", "output=", "correction=", "levenshtein=", "matrix=", "1=", "2=", "topK=", "alpha=", "verbosity=" ]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit(1) out_file = "./translated_vecs" additional = None levcosts = {} for opt, val in opts: # print(opt+'='+val) if opt in ("-o", "--ouput"): out_file = val elif opt in ("-l", "--levenshtein"): levcosts = u.readcosts(val) elif opt in ("-m", "--matrix"): tm_file = val elif opt == '-1': source_file = val elif opt == '-2': target_file = val elif opt in ("-c", "--correction"): try: additional = int(val) except ValueError: print("additional: %s" % val) usage(1) elif opt in ("-t", "--topK"): try: u.topK = int(val) except ValueError: print("topK: %s" % val) usage(1) elif opt in ("-v", "--verbosity"): try: u.verbosity = int(val) except ValueError: print("verbosity: %s" % val) usage(1) elif opt in ("-a", "--alpha"): try: u.alpha = float(val) except ValueError: print("alpha: %s" % val) usage(1) elif opt in ("-h", "--help"): usage(0) else: print("Unknown option: -%s %s" % (opt, val)) usage(1) if len(argv) == 1: test_file = argv[0] else: print('Unused arguments:') print(argv) usage(1) #if u.verbosity>0: # always log the parameters in the output sys.stdout.write(sys_argv[0] + " ") for opt, val in opts: sys.stdout.write(opt + " " + val + " ") print(test_file) if u.verbosity > 1: print("Loading the translation matrix %s " % tm_file) tm = np.loadtxt(tm_file) if u.verbosity > 1: print("Reading the test data %s " % test_file) test_data = u.read_dict(test_file) #in the _source_ space, we only need to load vectors for the words in test. #semantic spaces may contain additional words, ALL words in the _target_ #space are used as the search space source_words, _ = zip(*test_data) source_words = set(source_words) if u.verbosity > 1: print("Reading: %s" % source_file) if not additional: source_sp = Space.build(source_file, source_words) else: #read all the words in the space with io.open(source_file, 'r', encoding='utf8') as f: lexicon = set([l.split(' ')[0] for l in f]) # lexicon = set(np.loadtxt(source_file, skiprows=1, dtype=str, # comments=None, usecols=(0,)).flatten()) #the max number of additional+test elements is bounded by the size #of the lexicon additional = min(additional, len(lexicon) - len(source_words)) #we sample additional elements that are not already in source_words random.seed(100) if additional > 0: lexicon = random.sample(list(lexicon.difference(source_words)), additional) #load the source space source_sp = Space.build(source_file, source_words.union(set(lexicon))) source_sp.normalize() if u.verbosity > 1: print("Reading: %s" % target_file) target_sp = Space.build(target_file) target_sp.normalize() if u.verbosity > 1: print("Retrieving translations") test_data = u.get_valid_data(source_sp, target_sp, test_data) #turn test data into a dictionary (a word can have mutiple translation) gold = collections.defaultdict(set) for k, v in test_data: gold[k].add(v) if u.verbosity > 1: print("Translating" ) #translates all the elements loaded in the source space source_sp = u.apply_tm(source_sp, tm) u.score(source_sp, target_sp, gold, additional, levcosts) print("Printing mapped vectors: %s" % out_file) np.savetxt("%s.vecs.txt" % out_file, source_sp.mat) # np.savetxt("%s.wds.txt" % out_file, source_sp.id2row, fmt="%s") # no utf8 with open("%s.wds.txt" % out_file, "w") as outf: for s in source_sp.id2row: print(s, file=outf)
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description='Evaluate embeddings of two languages in a shared space in word translation induction') parser.add_argument('-1','--embeddings1', help='the source embeddings') parser.add_argument('-2','--embeddings2', help='the target embeddings') parser.add_argument('-d', '--dictionary', default=sys.stdin.fileno(), help='the test dictionary file (defaults to stdin)') parser.add_argument('--encoding', default='utf-8', action='store_true', help='the character encoding for input/output (defaults to utf-8)') parser.add_argument('-l', '--levenshtein', help='the Levenshtein cost dictionary') parser.add_argument('-r', '--runname', default='RUN', help='the name for the current run') parser.add_argument('-a', '--alpha', default='1', help='the contribution of cos vs Levenshtein') parser.add_argument('-k', '--topK', default=10, type=int, help='the number of top candidates to output') parser.add_argument('-n', '--neighbors', default=150, type=int, help='the number of neighbors for WLD') parser.add_argument('-t', '--threshold', type=int, default=0, help='reduce vocabulary of the model for fast approximate evaluation (0 = off, otherwise typical value is 30,000)') parser.add_argument('-v', '--verbosity', type=int, default=1, help='the verbosity level') args = parser.parse_args() verbosity=args.verbosity alpha=float(args.alpha) levcosts = ut.readcosts(args.levenshtein) # Read input embeddings source_sp = Space.build(args.embeddings1,threshold=args.threshold) #,set(wlist) source_sp.normalize() if verbosity>2: print('Read %d source embeddings from %s' % (len(source_sp.row2id), args.embeddings1), file=sys.stderr) test_sp = Space.build(args.embeddings2,threshold=args.threshold) #,set(wlist_test) test_sp.normalize() if verbosity>2: print('Read %d target embeddings from %s' % (len(test_sp.row2id), args.embeddings2), file=sys.stderr) # Read dictionary and compute coverage oov = set() vocab = set() f = open(args.dictionary, encoding=args.encoding, errors='surrogateescape') src2trg = collections.defaultdict(set) for line in f: src, trg = line.rstrip().split('\t') try: src_ind = source_sp.row2id[src] trg_ind = test_sp.row2id[trg] src2trg[src_ind].add(trg_ind) vocab.add(src) except KeyError: oov.add(src) if verbosity>1: print('Out of dict: (%s) vs (%s)' % (src,trg),file=sys.stderr) oov -= vocab # If one of the translation options is in the vocabulary, then the entry is not an oov coverage = len(src2trg) / (len(src2trg) + len(oov)) if verbosity>1: print('Vocab size: %d; Number of pairs: %d' % (len(vocab),len(src2trg)), file=sys.stderr) # Compute accuracy correctcount = 0 src, trg = zip(*src2trg.items()) test_T=test_sp.mat.T for i in range(0, len(src2trg), BATCH_SIZE): j = min(i + BATCH_SIZE, len(src2trg)) similarities = source_sp.mat[list(src[i:j])].dot(test_T) # nn = np.argmax(similarities, axis=1).tolist() nnl=np.argsort(-similarities, axis=1) #scores are negative for reverse sort for k in range(j-i): id_fs=nnl[k,0:args.neighbors].tolist() # nn[k]=curlist[0] id_fs=id_fs[0] scores_f=similarities[k,id_fs] if verbosity>4: print('Next word %s' % src[i+k], file=sys.stderr) w_e= source_sp.id2row[src[i+k]] for pos,id_f in enumerate(id_fs): w_f=test_sp.id2row[id_f] if alpha<1: scores_f[0,pos]= alpha*scores_f[0,pos]+(1-alpha)*(1-ut.iterative_levenshtein(w_e,w_f,levcosts)) topKlist=np.argsort(-scores_f).tolist()[0][0:args.topK] bestid_f=id_fs[topKlist[0]] covered=set() if bestid_f in trg[i+k]: correctcount += 1 for l in topKlist: id_f=id_fs[l] if id_f in covered: # for occasional double vectors continue covered.add(id_f) if id_f in trg[i+k]: correct = 'Y' else: correct = 'N' if verbosity>0: goldids=[test_sp.id2row[goldid] for goldid in trg[i+k]] print("%s\t%s\t%s\tR%d\t%.3f\t%s\t{%s}" %(source_sp.id2row[src[i+k]],correct,test_sp.id2row[id_f],l,scores_f[0,l],args.runname,','.join(goldids))) print('Coverage:{0:7.2%} Accuracy total:{1:7.2%} Accuracy vocab:{2:7.2%}'.format(coverage, correctcount / len(src2trg), correctcount / len(vocab)), file=sys.stderr)
def main(): # Parse command line arguments parser = argparse.ArgumentParser(description='Project the source embeddings into the target embedding space maximizing the squared Euclidean distances for the given dictionary') parser.add_argument('-1','--embeddings1', help='the source embeddings') parser.add_argument('-2','--embeddings2', help='the target embeddings') parser.add_argument('-t', '--train', help='the training file') parser.add_argument('-d', '--dev', help='the development file') parser.add_argument('-k', '--kfolds', type=int, default=5, help='K for cross-validation') parser.add_argument('-m', '--multilabel', type=int, default=0, help='use multilabel') parser.add_argument('-p', '--params', type=int, default=0, help='for HP tuning') parser.add_argument('-c', '--classifier', help='the classifier: LR, MLP, SVM or kNN') parser.add_argument('-s', '--solver', default='adam', help='the appropriate solver: sgd, adam, sag, liblinear, etc') parser.add_argument('-f', '--fallback', type=bool, default=False, help='use kNN as fallback') parser.add_argument('--encoding', default='utf-8', action='store_true', help='the character encoding for input/output (defaults to utf-8)') # parser.add_argument('-l', '--levenshtein', help='the Levenshtein cost dictionary') # parser.add_argument('-a', '--alpha', default='0.7', help='the contribution of cos vs Levenshtein') parser.add_argument('-v', '--verbosity', type=int, default=1, help='the verbosity level') args = parser.parse_args() verbosity=args.verbosity ut.verbosity=args.verbosity trainfile = ut.myopen(args.train, encoding=args.encoding, errors='surrogateescape') wlist, annot = ut.readtrain(args.train,{},args.multilabel) if verbosity>2: print('Read %d train examples from %s' % (len(annot), args.train)) if (args.dev): testfile = ut.myopen(args.dev, encoding=args.encoding, errors='surrogateescape') wlist_test, annot_test = ut.readtrain(args.dev,{},args.multilabel) if verbosity>2: print('Read %d test examples from %s' % (len(annot_test), args.dev)) source_sp = Space.build(args.embeddings1) #,set(wlist) if verbosity>2: print('Read %d source embeddings from %s' % (len(source_sp.row2id), args.embeddings1)) if (args.dev): if args.embeddings2: test_sp = Space.build(args.embeddings2) #,set(wlist_test) if verbosity>2: print('Read %d target embeddings from %s' % (len(test_sp.row2id), args.embeddings2)) else: test_sp=source_sp # Build word to index maps # src_desc2ind, src_word2ind = ut.makemaps(src_descs) vocab,y=ut.make_y(source_sp.id2row,wlist,annot) if verbosity>2: print('Total training lexicon of %d examples' % len(y)) if (args.dev): vocab_test,y_test=ut.make_y(test_sp.id2row,wlist_test,annot_test) if verbosity>2: print('Total testing lexicon of %d examples' % len(y_test)) # # Read dictionary # f = ut.myopen(args.dictionary, encoding=args.encoding, errors='surrogateescape') # src_indices = [] # trg_indices = [] # for line in f: # elts = line.split() # in case there are extra fields # src, trg = elts[0:2] # try: # src_ind = src_word2ind[src] # trg_ind = trg_word2ind[trg] # for i in src_ind: # for j in trg_ind: # src_indices.append(i) # trg_indices.append(j) # except KeyError: # if verbosity>1: # print('WARNING: OOV dictionary entry ({0} - {1})'.format(src, trg), file=sys.stderr) # if verbosity>2: # print('Read %d translations from %s' % (len(src_indices), args.dictionary), file=sys.stderr) # X_train, X_test, y_train, y_test = train_test_split(vocab, y, test_size=args.subset) # if verbosity>2: # print('Train lexicon: %d, test lexicon: %d' % (len(y_train),len(y_test))) # X_train_mat = source_sp.mat[[source_sp.row2id[el] for el in X_train],:] X_train_mat = source_sp.mat[[source_sp.row2id[el] for el in vocab],:] if args.dev: X_test_mat = test_sp.mat[[test_sp.row2id[el] for el in vocab_test],:] if args.multilabel: mlb = MultiLabelBinarizer() if (args.dev): yfull = y + y_test yfull = mlb.fit_transform(yfull) y = yfull[0:len(y)] y_test = yfull[len(y):] else: y = mlb.fit_transform(y) if args.classifier == 'LR': clf = LogisticRegression(solver=args.solver, max_iter=100, random_state=42, multi_class='multinomial') elif args.classifier == 'MLP': if (args.params>0): p=ParameterGrid({'hl': [(50,), (75,), (75,75,75), (75,50,25)], 'mi':[50,100,200], 'alpha':[1e-3,1e-4,1e-5], 'lri':[.1,1e-2,1e-3], 'act':['relu', 'logistic', 'tanh' ]})[args.params-1] else: p={'mi': 50, 'lri': 0.1, 'hl': (150,), 'alpha': 0.001, 'act': 'tanh'} # 'hl':(200,) is better print(p) clf = MLPClassifier(hidden_layer_sizes=p['hl'], max_iter=p['mi'], alpha=p['alpha'], solver=args.solver, verbose=0, tol=1e-4, random_state=1, learning_rate_init=p['lri'], activation=p['act'], early_stopping = True) elif args.classifier == 'SVC': clf = SVC() elif args.classifier == 'kNN': clf=KNeighborsClassifier(n_neighbors=3, weights='uniform', algorithm='auto') elif args.classifier == 'RF': clf=RandomForestClassifier() if verbosity>2: print('Classifier: %s' % args.classifier) if args.fallback: clf_fallback=KNeighborsClassifier(n_neighbors=3, weights='distance', algorithm='auto') # clf.fit(X_train_mat, y_train) # y_pred = clf.predict(X_test_mat) # print(confusion_matrix(y_test,y_pred)) # print(classification_report(y_test,y_pred)) # print(accuracy_score(y_test,y_pred)) # clf.predict_proba(X_test_mat) if (args.dev): clf.fit(X_train_mat, y) predictions = clf.predict(X_test_mat) if args.multilabel: if args.fallback: clf_fallback.fit(X_train_mat, y) pred_fallback = mlb.inverse_transform(clf_fallback.predict(X_test_mat)) print("Label ranking average precision: %0.3f" % metrics.label_ranking_average_precision_score(y_test,predictions)) print("F1 score: %0.3f" % metrics.f1_score(y_test,predictions,average='samples')) i=0 if verbosity>3: for gold,pred in zip(mlb.inverse_transform(y_test),mlb.inverse_transform(predictions)): if args.fallback and len(pred)==0: # occasionaly no output is given pred=pred_fallback[i] print("%0.1f\t%s\t%s\t%s" % (len(set(gold).intersection(set(pred)))/len(gold),vocab_test[i],gold,pred)) i+=1 else: print("F1 Macro: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) else: predictions = cross_val_predict(clf, X_train_mat, y, n_jobs=-2, cv=args.kfolds) scores = cross_val_score(clf, X_train_mat, y, n_jobs=-2, cv=args.kfolds, scoring='f1_macro') print("F1 Macro: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) if args.multilabel: if args.fallback: pred_fallback = mlb.inverse_transform(cross_val_predict(clf_fallback, X_train_mat, y, n_jobs=-2, cv=args.kfolds)) i=0 if verbosity>3: for gold,pred in zip(mlb.inverse_transform(y),mlb.inverse_transform(predictions)): if args.fallback and len(pred)==0: # occasionaly no output is given pred=pred_fallback[i] print("%1d\tFor %s (%s) fallback: %s" % (gold==pred,vocab[i],gold,pred), file=sys.stderr) print("%0.1f\t%s\t%s\t%s" % (len(set(gold).intersection(set(pred)))/len(gold),vocab[i],gold,pred)) i+=1
def main(sys_argv): try: opts, argv = getopt.getopt(sys_argv[1:], "ho:c:", ["help", "output=", "correction="]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit(1) out_file = "./translated_vecs" additional = None for opt, val in opts: if opt in ("-o", "--ouput"): out_file = val if opt in ("-c", "--correction"): try: additional = int(val) except ValueError: usage(1) elif opt in ("-h", "--help"): usage(0) else: usage(1) if len(argv) == 4: tm_file = argv[0] test_file = argv[1] source_file = argv[2] target_file = argv[3] else: # print(str(err)) usage(1) print("Loading the translation matrix") tm = np.loadtxt(tm_file) print("Reading the test data") test_data = read_dict(test_file) #in the _source_ space, we only need to load vectors for the words in test. #semantic spaces may contain additional words, ALL words in the _target_ #space are used as the search space source_words, _ = zip(*test_data) source_words = set(source_words) print("Reading: %s" % source_file) if not additional: source_sp = Space.build(source_file, source_words) else: #read all the words in the space lexicon = set(np.loadtxt(source_file, skiprows=1, dtype=str, comments=None, usecols=(0,)).flatten()) #the max number of additional+test elements is bounded by the size #of the lexicon additional = min(additional, len(lexicon) - len(source_words)) #we sample additional elements that are not already in source_words random.seed(100) lexicon = random.sample(list(lexicon.difference(source_words)), additional) #load the source space source_sp = Space.build(source_file, source_words.union(set(lexicon))) source_sp.normalize() print("Reading: %s" % target_file) target_sp = Space.build(target_file) target_sp.normalize() print("Translating") #translates all the elements loaded in the source space mapped_source_sp = apply_tm(source_sp, tm) print("Retrieving translations") test_data = get_valid_data(source_sp, target_sp, test_data) #turn test data into a dictionary (a word can have mutiple translation) gold = collections.defaultdict(set) for k, v in test_data: gold[k].add(v) score(mapped_source_sp, target_sp, gold, additional) print("Printing mapped vectors: %s" % out_file) np.savetxt("%s.vecs.txt" % out_file, mapped_source_sp.mat) np.savetxt("%s.wds.txt" % out_file, mapped_source_sp.id2row, fmt="%s")