Beispiel #1
0
 def subsample(self, train):
     all_names = list(train.keys())
     if (self.s_name is None):
         self.s_name = [
             name_i for name_i in all_names
             if (np.random.uniform() < self.p)
         ]
     s_train = {name_i: train[name_i] for name_i in self.s_name}
     return feats.Feats(s_train)
Beispiel #2
0
def cross_acc(datasets):
    datasets = [data_i.split()[0] for data_i in datasets]
    acc = []
    for data_i in datasets:
        new_data_i = feats.Feats()
        for j, name_j in enumerate(data_i.keys()):
            new_name = "%s_%d" % (name_j.split('_')[0], j)
            new_data_i[feats.Name(new_name)] = data_i[name_j]
        result_i = train.train_model(new_data_i, binary=True)
        acc.append(result_i.get_acc())
    return np.array(acc)
Beispiel #3
0
def to_feats(in_path, out_path):
    pairs = read(in_path)
    dtw_feats = feats.Feats()
    train, test = feats.split(pairs, names_only=True)
    #	raise Exception(train)
    for name_i in pairs.keys():
        dtw_feats[name_i] = np.array(
            [pairs[name_i][name_j] for name_j in train])
        #		raise Exception(dtw_feats[name_i].shape)
        print(dtw_feats[name_i].shape)
    dtw_feats.save(out_path)
Beispiel #4
0
 def __call__(self, datasets, clf="LR"):
     results = []
     new_datasets = []
     for data_i in datasets:
         train_i, test_i = data_i.split()
         s_train_i = self.subsample(train_i)
         s_data_i = {**s_train_i, **test_i}
         s_data_i = feats.Feats(s_data_i)
         new_datasets.append(s_data_i)
         result_i = learn.train_model(s_data_i, binary=False, clf_type=clf)
         results.append(result_i)
     return new_datasets, results
Beispiel #5
0
def reduce(data_i,n=100):
	if( not n or n>data_i.dim()[0]):
		return  data_i
	print("Old dim:" + str(data_i.dim()))
	X,y,names=data_i.as_dataset()
	train_i=data_i.split()[0]
	new_X=recursive(train_i,data_i,n)
	new_data_i=feats.Feats()
	for j,name_j in enumerate(names):
		new_data_i[name_j]=new_X[j]
	print("New dim:" + str(new_data_i.dim()))
	return new_data_i
Beispiel #6
0
def random_split(train):
    sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
    X, y, names = train.as_dataset()
    a = sss.split(X, y)
    new_dict = feats.Feats()
    for train_index, test_index in a:
        for i in test_index:
            name_i = "%d_0_%d" % (names[i].get_cat(), i)
            new_dict[names[i]] = train[names[i]]
        for i in train_index:
            name_i = "%d_1_%d" % (names[i].get_cat(), i)
            new_dict[names[i]] = train[names[i]]
    return new_dict
Beispiel #7
0
def one_shot(main,add,out_path):
	dtw_pairs=read(main)
	names=dtw_pairs.selection(person_id,center_selector)
	full=dtw_pairs.with_test(names)
	pairs=[dtw_pairs,read(add)] if(add) else [dtw_pairs]
	s_feats=feats.Feats()
	for name_i in full:
		vectors=[ pairs_i.get_vector(name_i,names) 
					for pairs_i in pairs]
#		v0=dtw_pairs.get_vector(name_i,names)
#		v1=add_pairs.get_vector(name_i,names)
		s_feats[name_i]=np.concatenate(vectors,axis=0)
	s_feats.save(out_path)
Beispiel #8
0
 def helper(data_i):
     feat_i = feats.Feats()
     for name_i, rename_i in rename.items():
         print((rename_i, name_i))
         feat_i[rename_i] = data_i[name_i]
     return feat_i
Beispiel #9
0
def compute_stats(in_path,out_path):
    seqs=files.get_seqs(in_path)
    feat_dict=feats.Feats()
    for name_i,seq_i in seqs.items():
        feat_dict[name_i]=EBTF(seq_i)
    feat_dict.save(out_path)
Beispiel #10
0
def to_feats(train,full,pairs,feat_path):
	dtw_feats=feats.Feats()
	for name_i in full:
		dtw_feats[name_i]=pairs.features(name_i,train)
	dtw_feats.save(feat_path)