示例#1
0
def run(args, env, agent, ram, env_params):
    if args.mode == 'train' and (not args.HER):
        training(args, env, agent, ram, env_params)
    elif args.mode == 'train' and args.HER:
        her_training(args, env, agent, ram, env_params)
    else:
        evaluating(args, env, agent, args.max_episodes - 1)
示例#2
0
def main(args):
    start_time = time.time()

    if args.training:
        training(args)

    if args.testing:
        testing(args)

    end_time = round((time.time() - start_time) / 60, 4)
    print(f'Done!; {end_time}min spend')
示例#3
0
def main(args):
    # Time setting
    total_start_time = time.time()

    if args.preprocessing:
        preprocessing(args)

    if args.training:
        training(args)

    # Time calculate
    print(f'Done! ; {round((time.time()-total_start_time)/60, 3)}min spend')
示例#4
0
def train(params,id):
    "this is to run the training once and get the final loss"
    # make the folders
    save_model_path = os.path.join(root_save_dir, str(id))
    save_result_path = os.path.join(save_model_path,"test_results")
    save_log_path = os.path.join(root_log_dir, "{}.txt".format(id))
    if (not os.path.exists(save_model_path)): os.makedirs(save_model_path)
    if (not os.path.exists(save_result_path)): os.makedirs(save_result_path)

    # perform training
    train_results = training(params=params, logger=LoggerGenerator.get_logger(
        log_file_path=save_log_path), save_model_to=save_model_path,train_func=do_forward_pass)
    # plot loss vs. iterations
    lines = [str(l) for l in train_results["code_loss_records"]]
    plot_trend_graph(var_names=["code loss"], var_indexes=[-1], var_types=["float"], var_colors=["r"], lines=lines,
                     title="code loss",save_to=os.path.join(save_result_path,"train-code_loss.png"),show_fig=False)
    lines = [str(l) for l in train_results["discr_loss_records"]]
    plot_trend_graph(var_names=["discriminator loss"], var_indexes=[-1], var_types=["float"], var_colors=["r"], lines=lines,
                     title="discriminator loss", save_to=os.path.join(save_result_path, "train-discriminator_loss.png"), show_fig=False)
    lines = [str(l) for l in train_results["discr_acc_records"]]
    plot_trend_graph(var_names=["discriminator acc"], var_indexes=[-1], var_types=["float"], var_colors=["r"], lines=lines,
                     title="discriminator acc", save_to=os.path.join(save_result_path, "train-discriminator_acc.png"), show_fig=False)

    with open(os.path.join(save_result_path,"train_records.txt"),"w") as f:
        f.write(str(train_results))
    print("finish training for parameter set #{}".format(id))

    # perform testing
    results = run_simple_test(params=params, saved_model_path=save_model_path,model_def=test_model)
    # save test results
    results["records"]["precision-recall-curve.jpg"].save(os.path.join(save_result_path,"precision-recall.png"))
    with open(os.path.join(save_result_path,"metrics.txt"),"w") as f:
        f.write(str(results["results"]))

    print("finish testing for parameter set #{}".format(id))
示例#5
0
def main(args):
    # Time setting
    total_start_time = time.time()

    preprocessing
    if args.preprocessing:
        preprocessing(args)

    # Augmentation by NER_Masking
    if args.augmenting:
        augmenting(args)

    # training
    if args.training:
        training(args)

    # Time calculate
    print(f'Done! ; {round((time.time()-total_start_time)/60, 3)}min spend')
示例#6
0
def main():
    dataTrain, labelsTrain = train.train_imgToArray()
    cowsTrain, labelsTrain = train.train_convertToNumpy(dataTrain, labelsTrain)
    x_train, y_train = train.train_createData(cowsTrain, labelsTrain)
    model = train.kerasModel()
    model = train.training(model, x_train, y_train)

    dataTest, labelsTest = test.test_imgToArray()
    cowsTest, labelsTest = test.test_convertToNumpy(dataTest, labelsTest)
    x_test, y_test = test.test_createData(cowsTest, labelsTest)
    test.testing(model, x_test, y_test)
def union(dataset_dict, args, dump_path):
    print('start data load domain-union')
    union_train_x = []
    union_test_x = []
    union_train_ga = []
    union_test_ga = []
    union_train_o = []
    union_test_o = []
    union_train_ni = []
    union_test_ni = []
    for domain in domain_dict:
        size = math.ceil(
            len(dataset_dict['{0}_x'.format(domain)]) * args.train_test_ratio)
        union_train_x += dataset_dict['{0}_x'.format(domain)][:size]
        union_test_x += dataset_dict['{0}_x'.format(domain)][size:]
        union_train_ga += dataset_dict['{0}_y_ga'.format(domain)][:size]
        union_test_ga += dataset_dict['{0}_y_ga'.format(domain)][size:]
    train_data = tuple_dataset.TupleDataset(union_train_x, union_train_ga)
    test_data = tuple_dataset.TupleDataset(union_test_x, union_test_ga)
    training(train_data, test_data, 'union', 'ga', dump_path, args)
示例#8
0
def main(args):

    print("+++ main")

    # Prepare experiment
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Create dataset
    dataset = Dataset(args.batch_size, args.nopad, device)
    train_iterator, valid_iterator, test_iterator = dataset.create_iterator()

    args.input_size = len(dataset.source_field.vocab)
    args.output_size = len(dataset.target_field.vocab)
    print(f"Source vocab size = {len(dataset.source_field.vocab)}")
    print(f"Target vocab size = {len(dataset.target_field.vocab)}")
    args.sos_idx = dataset.target_field.vocab.stoi[args.sos]
    args.eos_idx = dataset.target_field.vocab.stoi[args.eos]
    args.pad_idx = dataset.target_field.vocab.stoi[args.pad]

    # Create model
    model = create_model(args.model_type, args, device).to(device)
    init_function = create_init_function(args.init_type)
    model.apply(init_function)
    total_params = model.count_parameters()
    print(f"Total number of parameters = {total_params}")

    # Prepare training
    optimizer = create_optimizer(args.optim_type, args.learning_rate,
                                 model.parameters(), args)
    criterion = nn.CrossEntropyLoss(ignore_index=args.pad_idx)

    # Training
    training(model, train_iterator, valid_iterator, optimizer, args.optim_type,
             criterion, args.num_epochs, args.clip, args.nopad, device)

    print("--- main")
示例#9
0
def upload():
    if request.method == 'POST' and 'photo' in request.files:

        filename = photos.save(request.files['photo'])
        print("file has been saved")
        pickle_in = open("uploads/training_data.pickle", "rb")
        data = pickle.load(pickle_in)
        print("Daya take")
        i = 0
        for image, imgClass in data:

            if not os.path.exists("dataset/" + imgClass):

                os.makedirs("dataset/" + imgClass)
                ap(imgClass)

            path = "dataset/" + str(imgClass) + "/" + str(i) + ".jpg"
            print(path)
            cv2.imwrite(path, image)
            i = i + 1
        training()
        cv2.destroyAllWindows()

    return render_template('upload.html')
def main():

    trainGenerator, valGenerator, testGenerator = create_generators()

    if args.phase == 'train':
        model, history = training(trainGenerator, valGenerator)

    elif args.phase == 'test':

        file_path = "./checkpoint"
        model = tf.keras.models.load_model(file_path)

        testing(testGenerator, model)

    else:
        print("/!\ Unknown phase : type 'train' or 'test'")
        exit(0)
def filter_retrain(net, dirname):
    checkpoint_path = os.path.join(CHECK_POINT_PATH, args.net)
    time = str(datetime.date.today())
    most_recent_path = os.path.join(checkpoint_path, 'retrain', "most_recnet",
                                    dirname)
    if not os.path.exists(most_recent_path):
        os.makedirs(most_recent_path)
    retrain_checkpoint_path = os.path.join(checkpoint_path, 'retrain', time,
                                           dirname)
    if not os.path.exists(retrain_checkpoint_path):
        os.makedirs(retrain_checkpoint_path)

    train_loader = get_train_loader(args)
    test_loader = get_test_loader(args)

    net = training(net, 20, train_loader, test_loader, True, 0.001, 'SGD',
                   retrain_checkpoint_path, most_recent_path)
    return net
def run_experiment(epochs, model_name, training_type, configs):
    """ Runs the basic experiment"""

    print(epochs, "CONFIGS: ", configs)

    # set seed for reproducibility.
    seed = configs.seed
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    # gpu training specific seed settings.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # load data
    loaders = load_dataset(configs)

    # load model
    model = load_model(model_name, training_type, configs)

    # loss
    criterion = nn.CrossEntropyLoss().cuda()

    # optimizer
    # optimizer = optim.SGD(model.parameters(), configs.lr,
    #                       momentum=configs.momentum,
    #                       weight_decay=configs.weight_decay)
    optimizer = optim.Adam(model.parameters(), configs.lr)

    # get tracking dictionaries
    model_weights, layer_dict = setup_delta_tracking(model)

    # train model
    rmae_delta_dict, train_acc_arr, test_acc_arr = training(
        epochs, loaders, model, optimizer, criterion, model_weights,
        layer_dict, configs)

    return rmae_delta_dict, train_acc_arr, test_acc_arr
示例#13
0
reviews = createParts(reviews_file.read().splitlines())

accuracy = 0
for i in range(10):
	actual_values = []
	test_set = []
	for review in reviews[i]:
		test_set.append(review[1:])
		actual_values.append(review[0])
	
	training_set = []
	for j in range(10):
		if i != j:
			training_set.extend(reviews[j])
	
	P1, P2, pp, np = training(training_set, vocabulary)
	
#	print P1
#	print P2
#	print pp
#	print np
#	input('training')
	
	result_values = testing(test_set, vocabulary, P1, P2, pp, np)
	
#	print result_values
#	input('result values')

	current_accuracy = getAccuracy(actual_values, result_values)
	print current_accuracy
示例#14
0
# devide to train90% and vaild10% on labeled training set
X_train, X_val, y_train, y_val = train_x[:190000], train_x[
    190000:], y[:190000], y[190000:]
# devide to train90% and vaild10% on both labeled and predicted labeled training set
#X_train, X_val, y_train, y_val = train_x[20000:], train_x[:20000], y[20000:], y[:20000]
# devide to train90% and vaild10% on both labeled and predicted labeled training set
#X_train, y_train = train_x[:20000], y[:20000]
#X_val, y_val = train_x[190000:200000], y[190000:200000]
#X_train = torch.cat((X_train, train_x[200000:]), 0)
#y_train = torch.cat((y_train, y[200000:]), 0)

# to dataset
train_dataset = TwitterDataset(X=X_train, y=y_train)
val_dataset = TwitterDataset(X=X_val, y=y_val)

# to dataloader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=8)

val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         num_workers=8)

# training
training(batch_size, epoch, lr, model_dir, train_loader, val_loader, model,
         device)
示例#15
0
from __future__ import division
from train import training
from test import testing

filtbankN = 13

codebooks = training(filtbankN)

testing(codebooks, filtbankN)
示例#16
0
        if args.low_ratio == 0:
            print('\nTraining High Resolution images')
            print('\t on ',args.dataset.upper(),' dataset, with hyper parameters above')
            print('\nTraining starts')

            logger = getlogger(args.log_dir + '/DATASET_{}_HIGH_RES'.format(args.dataset))
            for arg in vars(args):
                logger.debug('{} - {}'.format(str(arg), str(getattr(args, arg))))
            logger.debug('\nTraining High Resolution images')
            logger.debug('\t on '+args.dataset.upper()+' dataset, with hyper parameters above\n\n')

            training(net, optimizer,
                     args.lr, args.lr_decay, args.epochs, args.ten_batch_eval,
                     train_loader, eval_train_loader, eval_validation_loader, num_training, num_validation,
                     args.low_ratio, args.result,
                     logger,
                     args.vgg_gap,
                     args.save,
                     args.writer
                     )
        else:
            print('\nTraining Low Resolution images')
            print('\t on ',args.dataset,' with hyper parameters above')
            print('\tLow resolution scaling = {} x {}'.format(args.low_ratio, args.low_ratio))
            print('\nTraining starts')

            logger = getlogger(args.log_dir + '/DATASET_{}_LOW_{}'.format(args.dataset, str(args.low_ratio)))
            for arg in vars(args):
                logger.debug('{} - {}'.format(str(arg), str(getattr(args, arg))))
            logger.debug('\nTraining Low Resolution images, Low resolution of {}x{}'.format(str(args.low_ratio), str(args.low_ratio)))
            logger.debug('\t on '+args.dataset.upper()+' dataset, with hyper parameters above\n\n')
示例#17
0
from train import training
import pickle

reviewed_file = open('new_data.txt', 'r')
vocabulary_file = open('vocabulary.txt', 'r')
model_file = open('model', 'wb')

reviews = reviewed_file.read().splitlines()
vocabulary = vocabulary_file.read().splitlines()

(P1, P2, pp, np) = training(reviews, vocabulary)
a = (P1, P2, vocabulary, pp, np)

pickle.dump(a, model_file)
model_file.close()
示例#18
0
def run(args, env, agent, ram, env_params):
    if(args.mode == 'train'):
        training(args, env, agent, ram, env_params)
    else:
        evaluating(args, env, agent, args.max_episodes - 1)
示例#19
0
from __future__ import division
import numpy as np
from scipy.io.wavfile import read
from LBG import EUDistance
from MFCC_algorithm import mfcc
from train import training
import os

#total number of speakers and filters required
totalspeakers = 4
nfilters = 16
#assigning the location of testing data
directory = os.getcwd() + '/test'
fname = str()
codebooks = training(nfilters)

#counter to count the number of speakers correctly identified
nCorrect_MFCC = 0


#calculating the minimum distance between neighbours
def minDistance(features, codebooks):
    speaker = 0
    distmin = np.inf
    for k in range(np.shape(codebooks)[0]):
        D = EUDistance(features, codebooks[k, :, :])
        dist = np.sum(np.min(D, axis=1)) / (np.shape(D)[0])
        if dist < distmin:
            distmin = dist
            speaker = k
        args.start_model)[0] + '_rngstate.pth'
    if os.path.exists(start_model_rngstate_path):
        torch.set_rng_state(
            torch.load(start_model_rngstate_path)['torch_rng_state'])
        torch.cuda.set_rng_state_all(
            torch.load(start_model_rngstate_path)['cuda_rng_state'])
        logging('-' * 30, f_log=f_log)
        logging('Loading saved rng states.', f_log=f_log)
        logging('-' * 30, f_log=f_log)

train_ppl, val_ppl = training(
    train_iter,
    val_iter,
    args.epochs,
    LMModel,
    optimizer,
    scheduler,
    args.gradclip,
    args.save,
    ##                              shard_size=args.shardsz,
    LMModel_parallel=LMModel_parallel,
    f_log=f_log)
##                              subvocab_size=args.subvocabsz)

######### test the trained model
##test_ppl = validating(test_iter, LMModel, shard_size=args.shardsz, LMModel_parallel=LMModel_parallel, f_log=f_log)
test_ppl = validating(test_iter,
                      LMModel,
                      LMModel_parallel=LMModel_parallel,
                      f_log=f_log)
logging('-' * 30, f_log=f_log)
logging('Test ppl: %f' % test_ppl, f_log=f_log)
示例#21
0
	bivocab = file2.read().splitlines()

	accuracy = 0
	for i in range(10):
		actual_values = []
		test_set = []
		for review in reviews[i]:
			test_set.append(review[1:])
			actual_values.append(review[0])
		
		training_set = []
		for j in range(10):
			if i != j:
				training_set.extend(reviews[j])
		
		P1, P2, upp, unp, bpp, bnp = training(training_set, univocab, bivocab)
		
	#	print P1
	#	print P2
	#	print pp
	#	print np
	#	input data('training')
		
		result_values = testing(test_set, univocab, bivocab, P1, P2, upp, unp, bpp, bnp)
		
	#	print result_values
	#	input data('result values')

		current_accuracy = getAccuracy(actual_values, result_values)
		print current_accuracy
def run_all(RESOLUTION):
    convert(RESOLUTION)
    model = training(RESOLUTION)
    predictions(RESOLUTION)
while True:
    print("1) Resize and convert images to array\n2) Train Model\n"
          "3) Load Model\n4) Test Model\n5) Run all\n6) Quit\n")
    choice = int(input("Choice: "))
    choice_dict = {
        1: convert,
        2: training,
        3: load_model,
        4: predictions,
        5: run_all,
        6: exit
    }

    if choice == 2:
        model = training(RESOLUTION)
    elif choice == 3:
        name = input("Enter model name: ")
        print("Loading model..." + name)
        try:
            model = load_model(name,
                               custom_objects={
                                   'smooth_l1_loss': smooth_l1_loss,
                                   'my_metric': my_metric
                               })
        except:
            print("\nError in loading model\n")

    else:
        choice_dict[choice](RESOLUTION)
示例#24
0

if __name__ == "__main__":
    # 参数配置
    config = configparser.ConfigParser()
    config.read('config/config.ini')
    # 读取参数
    training_path = config['BASE']['training_path']
    validation_path = config['BASE']['validation_path']
    input_size = int(config['TRAINING']['input_size'])
    batch_size = int(config['TRAINING']['batch_size'])
    max_epoch = int(config['TRAINING']['max_epoch'])
    learning_rate = float(config['TRAINING']['learning_rate'])

    # 读取图片数据,并存放到numpy array中
    training_x, training_y = common_util.read_data(training_path, True, input_size)
    validation_x, validation_y = common_util.read_data(validation_path, True, input_size)

    # 加载图片数据
    training_loader = ImageDataSet(training_x, training_y, TRAINING_TRANSFORM).get_loader(batch_size)
    validation_loader = ImageDataSet(validation_x, validation_y, TESTING_TRANSFORM).get_loader(batch_size, False)

    # 训练模型
    model = CNN128().cuda()
    loss = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    train = train.Train(model, loss, optimizer)
    train.training(training_loader, validation_loader, max_epoch)
    train.plot()
    pass
示例#25
0
    neg_dict = get_matrices.get_entries(neg_entries, -1)
    pos_dict = get_matrices.get_entries(pos_entries, 1)
    product_feature_matrix = get_matrices.get_product_feature_matrix(product_dict, product_index, lookup_dict, aspect_index, 5,
                                                        neg_dict, pos_dict)
    product_feature_matrix2 = get_matrices.get_product_feature_matrix(product_dict2, product_index2, lookup_dict, aspect_index, 5,
                                                        neg_dict, pos_dict)
    #product_feature_matrix3 = get_matrices.get_product_feature_matrix(product_dict3, product_index3, lookup_dict, aspect_index, 5,
                                                        #neg_dict, pos_dict)
    #product_feature_matrix4 = get_matrices.get_product_feature_matrix(product_dict4, product_index4, lookup_dict, aspect_index, 5,
                                                        #neg_dict, pos_dict)
    #u_id = user_index["A10UHQH1YL5Q6B"]
    #p_id = product_index["B00HH7MAUW"]
    #p_id = product_index["B00BGG5LO2"]
    #user_item_matrix[u_id, p_id] = 0
    [U1, U2, V, H1, H2] = train.training(user_item_matrix, user_feature_matrix, product_feature_matrix, 50, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 5000, 0.002)
    #[U1x, U2x, Vx, H1x, H2x] = train.training(user_item_matrix2, user_feature_matrix2, product_feature_matrix2, 50, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 5000, 0.002)
    #[U1y, U2y, Vy, H1y, H2y] = train.training(user_item_matrix3, user_feature_matrix3, product_feature_matrix3, 50, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 5000, 0.002)
    #[U1z, U2z, Vz, H1z, H2z] = train.training(user_item_matrix4, user_feature_matrix4, product_feature_matrix4, 50, 50, 0.01, 0.01, 0.01, 0.01, 0.01, 5000, 0.002)


    X_ = U1.dot(V.T)
    Y_ = U2.dot(V.T)
    A_ = U1.dot(U2.T) + H1.dot(H2.T)
    '''
    X_x = U1x.dot(Vx.T)
    Y_x = U2x.dot(Vx.T)
    A_x = U1x.dot(U2x.T) + H1x.dot(H2x.T)

    X_y = U1y.dot(Vy.T)
    Y_y = U2y.dot(Vy.T)
示例#26
0
def reading():
    '''
    Reading the dataset
    '''
    df = pd.read_csv('database.csv',
                     usecols=[
                         *range(0, 10000), *range(400000, 410000),
                         *range(790000, 800003)
                     ])
    print(df.head())

    return df


if __name__ == "__main__":
    print("Carregando os dados...")
    df = reading()

    print("\nAnálise da estrutura dos dados: ")
    analysis(df)

    print("\nTransformações necessárias no dataset: ")
    df_train, df_test = transform(df)

    print("\nTreinando o modelo...: ")
    prediction, y_test = training(df_train, df_test)

    print("\nAnálise de resultados: ")
    results(y_test, prediction)
示例#27
0
import torch
import argparse
import data_proc
import train

parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=str, default='cuda:6')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_epoch', type=int, default=50)
parser.add_argument('--lr', type=float, default=0.0004)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--ckp', type=str, default='ckp/model_3.pt')
parser.add_argument('--max_acc', type=float, default=0.5)
args = parser.parse_args()

if torch.cuda.is_available():
    print("using cuda......")
    device = torch.device(args.cuda)

w2v_model, train_iter, dev_iter, test_iter = data_proc.data(
    device, args.batch_size)
train.training(device, w2v_model, train_iter, dev_iter, test_iter,
               args.batch_size, args.num_epoch, args.lr, args.weight_decay,
               args.ckp, args.max_acc)
示例#28
0
y = preprocess.labels_to_tensor(y)

# 定义model
model = LSTM_Net(embedding,
                 embedding_dim=250,
                 hidden_dim=150,
                 num_layers=1,
                 dropout=0.5,
                 fix_embedding=fix_embedding)
model = model.to(device)

# 将一部分训练集当做验证集
X_train, X_val, y_train, y_val = train_x[:180000], train_x[
    180000:], y[:180000], y[180000:]

# 加载数据
train_dataset = TwitterDataset(X=X_train, y=y_train)
val_dataset = TwitterDataset(X=X_val, y=y_val)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True,
                                           num_workers=8)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         num_workers=8)

# 开始训练
training(batch_size, epoch, lr, './data', train_loader, val_loader, model,
         device)
    print('vocab_size is :', len(vocab))
    train_corpus_ids = train_corpus.tokenize(
        vocab)  # contains all tokenized corpus
    train_set = CorpusDataset(train_corpus_ids)
    train_dl = DataLoader(train_set,
                          batch_size=config.batch_size,
                          shuffle=True,
                          collate_fn=collate_fn)

    # todo load test dataset

    # ---- model&optim&etc... ---- #
    model = GatedCNN(vocab_size=len(vocab),
                     embed_dim=config.embed_dim,
                     kernel_width=config.kernel_width,
                     out_channel=config.out_channel,
                     n_layers=config.n_layers,
                     res_block_cnt=config.res_block_cnt,
                     dropout=config.dropout)
    if config.use_cuda:
        model = model.cuda()
    optimizer = torch.optim.Adam(model.parameters())
    criterion = torch.nn.CrossEntropyLoss(
        ignore_index=0)  # very important to set ignore_index to 0

    # train
    print('start time is', get_time_dif(start_time))
    loss = training(model, optimizer, criterion, train_dl)
    print('loss for this epoch is', loss)
    print('time used for this epoch', get_time_dif(start_time))
示例#30
0
teacher_net1 = VGG16().cuda()
teacher_net1.load_state_dict(torch.load('model/vgg16.model'))
teacher_net2 = VGG13().cuda()
teacher_net2.load_state_dict(torch.load('model/vgg13.model'))
# teacher_net3 = VGG19().cuda()
# teacher_net3.load_state_dict(torch.load('teacher_model/vgg19.model'))
teacher_net.append(teacher_net1)
teacher_net.append(teacher_net2)
# teacher_net.append(teacher_net3)
student_net = StudentNet(base=16).cuda()
#student_net = MobileNetV2(n_class=11).cuda()

print('Start Training')
training(teacher_net,
         student_net,
         train_loader,
         val_loader,
         test_loader,
         total_epoch=400)
models = []

# Deep Mutual Learning
# model1 = VGG16().cuda()
# smodel1.load_state_dict(torch.load('teacher_model/vgg16.model'))
# model2 = VGG13().cuda()
# model2.load_state_dict(torch.load('teacher_model/vgg13.model'))
# model3 = StudentNet(base=16).cuda()
# model3.load_state_dict(torch.load('student_model.bin'))

# # models.append(model1)
# models.append(model2)
# models.append(model3)
from __future__ import division
import numpy as np
from scipy.io.wavfile import read
from LBG import EUDistance
from mel_coefficients import mfcc
from LPC import lpc
from train import training
import os

nSpeaker = 8
nfiltbank = 12
orderLPC = 15
(codebooks_mfcc, codebooks_lpc) = training(nfiltbank, orderLPC)
directory = os.getcwd() + '/test'
fname = str()
nCorrect_MFCC = 0
nCorrect_LPC = 0


def minDistance(features, codebooks):
    speaker = 0
    distmin = np.inf
    for k in range(np.shape(codebooks)[0]):
        D = EUDistance(features, codebooks[k, :, :])
        dist = np.sum(np.min(D, axis=1)) / (np.shape(D)[0])
        if dist < distmin:
            distmin = dist
            speaker = k

    return speaker
示例#32
0
                         args.n_mode - 1, p_path + '/train')
    tr_batch = tr_bc.db_load()

    val_path = glob(p_path + '/validation/**')
    val_batch = []
    for path in val_path:
        val_bc = Create_Batch(args.batch_size, int(args.patch_size / 2),
                              args.n_mode - 1, path)
        val_batch.append(val_bc.db_load())

    # Training & Validation
    cnt = 1
    for ep in range(args.n_epoch):

        # Training
        models, cnt = training(args, tr_batch, models, loss_func, optimizer,
                               cnt, model_path)

        # Training
        validation(args, tr_batch, models, ep)

        # Validation
        for b in val_batch:
            validation(args, b, models, ep)

else:

    # Real MR data test (Segmentation)
    if args.data_name == 'YS':
        test_bc = Create_Batch(args.batch_size, int(args.patch_size / 2),
                               args.n_mode - 1, p_path + '/test_ys/0')
        test_batch = test_bc.db_load()