Beispiel #1
0
def train(epoch):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(train_loader.dataset) // c.batch_size)
    for num_iter, batch in enumerate(train_loader):
        start_time = time.time()
        wav = batch[0].unsqueeze(1)
        mel = batch[1].transpose(1, 2)
        lens = batch[2]
        target = batch[3]
        if use_cuda:
            wav = wav.cuda()
            mel = mel.cuda()
            target = target.cuda()
        current_step = num_iter + epoch * len(train_loader) + 1
        optimizer.zero_grad()
        out = model(wav, mel)
        loss, fp, tp = criterion(out, target, lens)
        loss.backward()
        grad_norm, skip_flag = check_update(model, 5, 100)
        if skip_flag:
            optimizer.zero_grad()
            print(" | > Iteration skipped!!")
            continue
        optimizer.step()
        step_time = time.time() - start_time
        epoch_time += step_time
        # update
        progbar.update(num_iter+1, values=[('total_loss', loss.item()),
                                           ('grad_norm', grad_norm.item()),
                                           ('fp', fp),
                                           ('tp', tp)
                                          ])
        avg_loss += loss.item()
Beispiel #2
0
def evaluate(epoch, ema):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(val_loader.dataset) // c.eval_batch_size)
    ema_model = FFTNetModel(hid_channels=256,
                            out_channels=256,
                            n_layers=c.num_quant,
                            cond_channels=80)
    ema_model = ema.assign_ema_model(model, ema_model, use_cuda)
    ema_model.eval()
    with torch.no_grad():
        for num_iter, batch in enumerate(train_loader):
            start_time = time.time()
            wav = batch[0].unsqueeze(1)
            mel = batch[1].transpose(1, 2)
            lens = batch[2]
            target = batch[3]
            if use_cuda:
                wav = wav.cuda()
                mel = mel.cuda()
                target = target.cuda()
            current_step = num_iter + epoch * len(train_loader) + 1
            out = ema_model(wav, mel)
            loss, fp, tp = criterion(out, target, lens)
            step_time = time.time() - start_time
            epoch_time += step_time
            # update
            progbar.update(num_iter + 1,
                           values=[('total_loss', loss.item()), ('fp', fp),
                                   ('tp', tp)])
            avg_loss += loss.item()
Beispiel #3
0
def evaluate(epoch):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(val_loader.dataset) // c.eval_batch_size)
    with torch.no_grad():
        for num_iter, batch in enumerate(train_loader):
            start_time = time.time()
            wav = batch[0].unsqueeze(1)
            mel = batch[1].transpose(1, 2)
            lens = batch[2]
            target = batch[3]
            if use_cuda:
                wav = wav.cuda()
                mel = mel.cuda()
                target = target.cuda()
            current_step = num_iter + epoch * len(train_loader) + 1
            out = model(wav, mel)
            loss, fp, tp = criterion(out, target, lens)
            step_time = time.time() - start_time
            epoch_time += step_time
            # update
            progbar.update(num_iter+1, values=[('total_loss', loss.item()),
                                               ('grad_norm', grad_norm.item()),
                                               ('fp', fp),
                                               ('tp', tp)
                                              ])
            avg_loss += loss.item()
 def dl_progress(count, block_size, total_size):
     global progbar
     if total_size < 1000000:
         return
     if progbar is None:
         progbar = Progbar(total_size)
     else:
         progbar.update(count * block_size)
Beispiel #5
0
def train(epoch):
    avg_loss = 0.0
    epoch_time = 0
    progbar = Progbar(len(train_loader.dataset) // c.batch_size)
    if c.ema_decay > 0:
        ema = EMA(c.ema_decay)
        for name, param in model.named_parameters():
            if param.requires_grad:
                ema.register(name, param)
    else:
        ema = None
    model.train()
    for num_iter, batch in enumerate(train_loader):
        start_time = time.time()
        wav = batch[0].unsqueeze(1)
        mel = batch[1].transpose(1, 2)
        lens = batch[2]
        target = batch[3]
        if use_cuda:
            wav = wav.cuda()
            mel = mel.cuda()
            target = target.cuda()
        current_step = num_iter + epoch * len(train_loader) + 1
        optimizer.zero_grad()
        # out = torch.nn.parallel.data_parallel(model, (wav, mel))
        out = model(wav, mel)
        loss, fp, tp = criterion(out, target, lens)
        loss.backward()
        grad_norm, skip_flag = check_update(model, 5, 100)
        if skip_flag:
            optimizer.zero_grad()
            print(" | > Iteration skipped!!")
            continue
        optimizer.step()
        # model ema
        if ema is not None:
            for name, param in model.named_parameters():
                if name in ema.shadow:
                    ema.update(name, param.data)
        step_time = time.time() - start_time
        epoch_time += step_time
        # update
        progbar.update(num_iter + 1,
                       values=[('total_loss', loss.item()),
                               ('grad_norm', grad_norm.item()), ('fp', fp),
                               ('tp', tp)])
        avg_loss += loss.item()
    return ema, avg_loss
Beispiel #6
0
def mainmodel(dro=0.4, lr=0.00005, threshold = 0.5, fn_weights = 1.0, model=modeltype, mini_batch=20, num_epochs=500, preload=preload):
    print("model:%s minibatch:%d num_epochs:%d dropout:%f learingrate:%f threshold:%f fn_weights:%f\n" 
          %(model,mini_batch,num_epochs,dro,lr,threshold,fn_weights)) 

    print('Preprocessing data...')
    for i in range(X_train.shape[0]):
        X_train[i,0] = preprocessing.scale(X_train[i,0])
        X_train[i,1] = preprocessing.scale(X_train[i,1])
        X_train[i,2] = preprocessing.scale(X_train[i,2])

    for i in range(X_val.shape[0]):
        X_val[i,0] = preprocessing.scale(X_val[i,0])
        X_val[i,1] = preprocessing.scale(X_val[i,1])
        X_val[i,2] = preprocessing.scale(X_val[i,2])

    print ('The shape of training data is' + str(X_train.shape))
    print ('The shape of test data is' + str(X_val.shape))
    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('input')
    target_var = T.matrix('targets')
    # target_var = T.matrix('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")

    # add input_var when needed in transfer learning model
    network = build_model(input_var=input_var,dro=dro)
 
    if preload:
        if model=='vgg':
            read_model_param(network['fc7'],modelloaddir, preload)
        else:
            read_model_param(network['pool5/7x7_s1'],modelloaddir, preload)
        print('pretrained model loaded')
    start_time = time.time()
    networkout = network['prob']

    prediction = lasagne.layers.get_output(networkout)
    # disable dropout
    test_prediction = lasagne.layers.get_output(networkout, deterministic=True)

    # loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = fas_neg_entrop(prediction, target_var, fn_weights)
    loss = loss.mean()

    params = lasagne.layers.get_all_params(networkout, trainable=True)
    updates = lasagne.updates.adagrad(
            loss, params, learning_rate=lr, epsilon=1e-06)

    train_fn = theano.function([input_var, target_var], loss, updates=updates)

    getpred_fn = theano.function([input_var], prediction)

    getpred_test_fn = theano.function([input_var], test_prediction)

    print("Starting training...")
    time.time()-start_time
    # We iterate over epochs:
    # log the accuracy of each epoch
    fout = open('../log/xin_log_dro_' + str(dro) +'_lr_'+ str(lr) +'_ts_'+ str(threshold) + '.log', 'w+')
    
    list_sort = []#postive,negtive,epoch

    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        fout.write('Epoch'+str(epoch)+'\n')
        print('Epoch',epoch)

        train_batches = 0
        progbarcount = 0
        progbar=Progbar(30)

        # log the probabilities of prediction in each epoch
        output_path = '../log/prediction_epoch_' + str(epoch) + '.out'
        output_path_train = '../log/train_epoch_' + str(epoch) + '.out'

        with file(output_path_train, 'wb') as outfile_train:
            for batch in iterate_minibatches(X_train, Y_train, mini_batch, shuffle=True):
                inputs, targets = batch
                # reshape to fit the loss function calculation
                targets = targets.reshape((targets.shape[0],1))
                progbarcount = progbarcount + np.float(len(targets))/len(Y_train)*25

                batch_pred_train = getpred_fn(inputs)

                if train_batches == 0:
                    pred_train = batch_pred_train
                    y_label_train = targets
                    train_batches += 1
                else:
                    pred_train = np.concatenate((pred_train,batch_pred_train))
                    y_label_train = np.concatenate((y_label_train,targets))
                    train_batches += 1

                np.savetxt(outfile_train, batch_pred_train, fmt='%-7.2f')
                


                err =  train_fn(inputs, targets)
        
        outfile_train.close()
        progbar.update(progbarcount,values=[('train_batches',train_batches)])              

        # And a full pass over the validation data:

        val_batches = 0


        with file(output_path, 'wb') as outfile:
            pred_val = []
            for batch in iterate_minibatches(X_val, Y_val, np.min([mini_batch,len(Y_val)]), shuffle=True):
                inputs, targets = batch
                batch_pred_val = getpred_test_fn(inputs)
                if val_batches == 0:
                    pred_val = batch_pred_val
                    y_label_val = targets
                else:
                    pred_val = np.concatenate((pred_val,batch_pred_val))
                    y_label_val = np.concatenate((y_label_val,targets))

                np.savetxt(outfile, batch_pred_val, fmt='%-7.2f')
                val_batches += 1
                
        outfile.close()
        progbar.update(30,values=[('val_batches',val_batches)])

        ## For softmax
        # train_acc = get_acc(y_label_train, pred_train[:,1], threshold)
        # val_acc = get_acc(y_label_val, pred_val[:,1], threshold)

        # For sigmoid
        train_acc = get_acc(y_label_train, pred_train, threshold)
        val_acc = get_acc(y_label_val, pred_val, threshold)

        # img_save_path = '../log/curve_epoch_' + str(epoch) + '.png'
        # plot_recall_curve(y_label_val,pred_val,img_save_path)

        # all_param_values = lasagne.layers.get_all_param_values(networkout)

#        print ('Accuracy       : {:.2f} %'.format(100*train_acc[0][0]))
#        print ('Precision      : {:.2f} %'.format(100*train_acc[0][3]))
#        print ('Positive Recall: {:.2f} %'.format(100*train_acc[0][1]))
#        print ('Negtive Recall : {:.2f} %'.format(100*train_acc[0][2]))
#
#        print ('Val Accuracy       : {:.2f} %'.format(100*val_acc[0]))
#        print ('Val Precision      : {:.2f} %'.format(100*val_acc[3]))
#        print ('Val Positive Recall: {:.2f} %'.format(100*val_acc[1]))
#        print ('Val Negtive Recall : {:.2f} %'.format(100*val_acc[2]))

        print ('Accuracy       : {:.2f}%  {:.2f}%'.format(100*train_acc[0][0], 100*val_acc[0]))
        print ('Precision      : {:.2f}%  {:.2f}%'.format(100*train_acc[0][3], 100*val_acc[3]))
        print ('Positive Recall: {:.2f}%  {:.2f}%'.format(100*train_acc[0][1], 100*val_acc[1]))
        print ('Negtive Recall : {:.2f}%  {:.2f}%'.format(100*train_acc[0][2], 100*val_acc[2]))

        if 0 == epoch:
            list_sort.append([val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
        saveflag = True
        i = 0
        while i< len(list_sort) and i < 30:
            if 1.0 == val_acc[1] and val_acc[2] < 0.1:
                saveflag = False
                break
            elif val_acc[1] > list_sort[i][2] and abs(train_acc[0][0]-val_acc[0])> 0.05 and train_acc[0][0] < 0.99:
                list_sort.insert(i, [val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
                break
            elif val_acc[1] == list_sort[i][2] and val_acc[2] > list_sort[i][3] and \
            abs(train_acc[0][0]-val_acc[0])> 0.05 and train_acc[0][0] < 0.99:
                list_sort.insert(i, [val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
                break
            else:
                i +=1
        if 30 == i:
            saveflag = False

            
        fout.write('Accuracy       : {:.2f} %'.format(100*train_acc[0][0]))
        fout.write('Precision      : {:.2f} %'.format(100*train_acc[0][3]))
        fout.write('Positive Recall: {:.2f} %'.format(100*train_acc[0][1]))
        fout.write('Negtive Recall : {:.2f} %'.format(100*train_acc[0][2]))

        fout.write('Val Accuracy       : {:.2f} %'.format(100*val_acc[0]))
        fout.write('Val Precision      : {:.2f} %'.format(100*val_acc[3]))
        fout.write('Val Positive Recall: {:.2f} %'.format(100*val_acc[1]))
        fout.write('Val Negtive Recall : {:.2f} %'.format(100*val_acc[2]))
        
        if True == saveflag:
            saveto = ('1w_zzz_jjy_qt_SavedModels_Epoch_' + str(epoch) +
                     '_dro_' + str(dro) +'_lr_'+ str(lr) +'_ts_'+ str(threshold)+ '.params')
            write_model_param(networkout,modelsavedir,saveto)
                     
        

    # After training, we compute and print the test error:
    for one in list_sort[:30]:
        print(one)
    fout.close()
    print ("Training Completed!")
    return 0
Beispiel #7
0
def mainmodel(model=modeltype,
              mini_batch=20,
              num_epochs=30,
              dro=0.7,
              lr=0.0001,
              preload=preload,
              saveto=saveto):
    print("model:%s minibatch:%d num_epochs:%d dropout:%f learingrate:%f\n" %
          (model, mini_batch, num_epochs, dro, lr))

    # Load the dataset
    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = load_data()

    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('input')
    target_var = T.ivector('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")

    ## add input_var when needed in transfer learning model
    network = build_model(input_var=input_var, dro=dro)

    if preload:
        #        with open(preload, 'r') as f:
        #            data = pickle.load(f)
        #        print(data)
        #        lasagne.layers.set_all_param_values(model, data)
        if model == 'vgg':
            read_model_param(network['fc7'], modelloaddir, preload)
        else:
            read_model_param(network['pool5/7x7_s1'], modelloaddir, preload)
        print('pretrained model loaded')
    start_time = time.time()
    networkout = network['prob']
    #networkout=network
    prediction = lasagne.layers.get_output(networkout)
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = loss.mean()
    acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
                 dtype=theano.config.floatX)

    params = lasagne.layers.get_all_params(networkout, trainable=True)
    updates = lasagne.updates.adagrad(loss,
                                      params,
                                      learning_rate=lr,
                                      epsilon=1e-06)

    test_prediction = lasagne.layers.get_output(networkout, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(
        test_prediction, target_var)
    test_loss = test_loss.mean()

    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    train_fn = theano.function([input_var, target_var], [loss, acc],
                               updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])

    # Finally, launch the training loop.
    print("Starting training...")
    time.time() - start_time
    # We iterate over epochs:
    train_accuracy = []
    train_losses = []
    val_accuracy = []
    val_losses = []
    time_id = str(int(time.time()))
    #outfilename='./benchmark/'+model+'_'+str(mini_batch)+'_'+str(num_epochs)+'_'+str(dro)+'_'+str(lr)+'_'+time_id+'.txt'
    outfilename = './benchmark/' + model + '_' + str(mini_batch) + '_' + str(
        num_epochs) + '_' + str(dro) + '_' + str(lr) + '.txt'
    print(outfilename)
    outfile = open(outfilename, 'w')
    #outfile.write("model:%s mini_batch:%s num_epochs:%s dro:%s learningrate:%s\n" % (model,mini_batch,num_epochs,dro,lr))
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        print('Epoch', epoch)
        train_err = 0
        train_acc = 0
        train_batches = 0
        progbarcount = 0
        progbar = Progbar(30)
        for batch in iterate_minibatches(X_train,
                                         y_train,
                                         mini_batch,
                                         shuffle=True):
            inputs, targets = batch
            progbarcount = progbarcount + np.float(
                len(targets)) / len(y_train) * 25
            err, acc = train_fn(inputs, targets)
            train_err += err
            train_acc += acc
            train_batches += 1
            #print(train_batches)
            progbar.update(progbarcount,
                           values=[('acc', round(train_acc / train_batches,
                                                 3)),
                                   ('loss', round(train_err / train_batches,
                                                  3))])

        train_accuracy.append(round(train_acc / train_batches, 3))
        train_losses.append(round(train_err / train_batches, 3))
        # And a full pass over the validation data:
        val_err = 0
        val_acc = 0
        val_batches = 0
        for batch in iterate_minibatches(X_val,
                                         y_val,
                                         np.min([mini_batch,
                                                 len(y_val)]),
                                         shuffle=False):
            inputs, targets = batch
            err, acc = val_fn(inputs, targets)
            val_err += err
            val_acc += acc
            val_batches += 1
        #print(val_acc,val_err,val_batches)
        progbar.update(30,
                       values=[('val_acc', round(val_acc / val_batches, 3)),
                               ('val_loss', round(val_err / val_batches, 3))])
        val_accuracy.append(round(val_acc / val_batches, 3))
        val_losses.append(round(val_err / val_batches, 3))

    # After training, we compute and print the test error:
    print(val_losses)
    '''
    outfile.write('\n')
    outfile.write('train_acc: ')
    for it in train_accuracy:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('train_loss: ')
    for it in train_losses:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('val_acc: ')
    for it in val_accuracy:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('vall_loss: ')
    for it in val_losses:
        outfile.write('%s,' % it)
    outfile.write('\n')
    '''
    test_err = 0
    test_acc = 0
    test_batches = 0
    for batch in iterate_minibatches(X_val,
                                     y_val,
                                     np.min([mini_batch,
                                             len(y_val)]),
                                     shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        test_err += err
        test_acc += acc
        test_batches += 1
    print("")
    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))

    #outfile.write('\n')
    #outfile.write("test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
    outfile.write("test accuracy:\t{:.2f}".format(test_acc / test_batches))
    outfile.close()
    write_model_param(networkout, modelsavedir, saveto)
    return train_err / train_batches, train_acc / train_batches * 100,
    val_err / val_batches, val_acc / val_batches * 100
Beispiel #8
0
def mainmodel(dro=0.4, lr=0.00005, threshold = 0.5, fn_weights = 1.0, model=modeltype, mini_batch=20,num_epochs=100, preload=preload):
    print("model:%s minibatch:%d num_epochs:%d dropout:%f learingrate:%f threshold:%f fn_weights:%f\n" 
          %(model,mini_batch,num_epochs,dro,lr,threshold,fn_weights)) 

    print('Preprocessing data...')
    for i in range(X_train.shape[0]):
        X_train[i,0] = preprocessing.scale(X_train[i,0])
        X_train[i,1] = preprocessing.scale(X_train[i,1])
        X_train[i,2] = preprocessing.scale(X_train[i,2])

    for i in range(X_val.shape[0]):
        X_val[i,0] = preprocessing.scale(X_val[i,0])
        X_val[i,1] = preprocessing.scale(X_val[i,1])
        X_val[i,2] = preprocessing.scale(X_val[i,2])

    print ('The shape of training data is' + str(X_train.shape))
    print ('The shape of test data is' + str(X_val.shape))
    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('input')
    target_var = T.matrix('targets')
    # target_var = T.matrix('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")

    # add input_var when needed in transfer learning model
    network = build_model(input_var=input_var,dro=dro)
 
    if preload:
        if model=='vgg':
            read_model_param(network['fc7'],modelloaddir, preload)
        else:
            read_model_param(network['pool5/7x7_s1'],modelloaddir, preload)
        print('pretrained model loaded')
    start_time = time.time()
    networkout = network['prob']

    prediction = lasagne.layers.get_output(networkout)
    # disable dropout
    test_prediction = lasagne.layers.get_output(networkout, deterministic=True)

    # loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = fas_neg_entrop(prediction, target_var, fn_weights)
    loss = loss.mean()

    params = lasagne.layers.get_all_params(networkout, trainable=True)
    updates = lasagne.updates.adagrad(
            loss, params, learning_rate=lr, epsilon=1e-06)

    train_fn = theano.function([input_var, target_var], loss, updates=updates)

    getpred_fn = theano.function([input_var], prediction)

    getpred_test_fn = theano.function([input_var], test_prediction)

    print("Starting training...")
    time.time()-start_time
    # We iterate over epochs:
    # log the accuracy of each epoch
    fout = open('../log/xin_log_dro_' + str(dro) +'_lr_'+ str(lr) +'_ts_'+ str(threshold) + '.log', 'w+')

    list_sort = []#postive,negtive,epoch
    list_sort.append([0,0,0,0,0])
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        fout.write('Epoch'+str(epoch)+'\n')
        print('Epoch',epoch)

        train_batches = 0
        progbarcount = 0
        progbar=Progbar(30)

        # log the probabilities of prediction in each epoch
        output_path = '../log/prediction_epoch_' + str(epoch) + '.out'
        output_path_train = '../log/train_epoch_' + str(epoch) + '.out'

        with file(output_path_train, 'wb') as outfile_train:
            for batch in iterate_minibatches(X_train, Y_train, mini_batch, shuffle=True):
                inputs, targets = batch
                # reshape to fit the loss function calculation
                targets = targets.reshape((targets.shape[0],1))
                progbarcount = progbarcount + np.float(len(targets))/len(Y_train)*25

                batch_pred_train = getpred_fn(inputs)

                if train_batches == 0:
                    pred_train = batch_pred_train
                    y_label_train = targets
                    train_batches += 1
                else:
                    pred_train = np.concatenate((pred_train,batch_pred_train))
                    y_label_train = np.concatenate((y_label_train,targets))
                    train_batches += 1

                np.savetxt(outfile_train, batch_pred_train, fmt='%-7.2f')
                


                err =  train_fn(inputs, targets)
        
        outfile_train.close()
        progbar.update(progbarcount,values=[('train_batches',train_batches)])              

        # And a full pass over the validation data:

        val_batches = 0


        with file(output_path, 'wb') as outfile:
            pred_val = []
            pred = []
            for batch in iterate_minibatches(X_val, Y_val, np.min([mini_batch,len(Y_val)]), shuffle=False):
                inputs, targets = batch
                batch_pred_val = getpred_test_fn(inputs)
                pdd = getpred_test_fn(inputs)
                if val_batches == 0:
                    pred_val = batch_pred_val
                    y_label_val = targets
                else:
                    pred_val = np.concatenate((pred_val,batch_pred_val))
                    y_label_val = np.concatenate((y_label_val,targets))

                np.savetxt(outfile, batch_pred_val, fmt='%-7.2f')
                val_batches += 1
                pred.append(pdd)
            np.save(modelsavedir+str(epoch) +'.npy',pred)
        outfile.close()
        progbar.update(30,values=[('val_batches',val_batches)])

        ## For softmax
        # train_acc = get_acc(y_label_train, pred_train[:,1], threshold)
        # val_acc = get_acc(y_label_val, pred_val[:,1], threshold)

        # For sigmoid
        train_acc = get_acc(y_label_train, pred_train, threshold)
        val_acc = get_acc(y_label_val, pred_val, threshold)

        # img_save_path = '../log/curve_epoch_' + str(epoch) + '.png'
        # plot_recall_curve(y_label_val,pred_val,img_save_path)

        # all_param_values = lasagne.layers.get_all_param_values(networkout)

#        print ('Accuracy       : {:.2f} %'.format(100*train_acc[0][0]))
#        print ('Precision      : {:.2f} %'.format(100*train_acc[0][3]))
#        print ('Positive Recall: {:.2f} %'.format(100*train_acc[0][1]))
#        print ('Negtive Recall : {:.2f} %'.format(100*train_acc[0][2]))
#
#        print ('Val Accuracy       : {:.2f} %'.format(100*val_acc[0]))
#        print ('Val Precision      : {:.2f} %'.format(100*val_acc[3]))
#        print ('Val Positive Recall: {:.2f} %'.format(100*val_acc[1]))
#        print ('Val Negtive Recall : {:.2f} %'.format(100*val_acc[2]))

        print ('Accuracy       : {:.2f} %  {:.2f} %'.format(100*train_acc[0][0], 100*val_acc[0]))
        print ('Precision      : {:.2f} %  {:.2f} %'.format(100*train_acc[0][3], 100*val_acc[3]))
        if 0.9 <= val_acc[1] and val_acc[1]<0.98 and val_acc[2]>0.25:
            print('----------------------------------------------------------------------------')
        print ('Positive Recall: {:.2f} %  {:.2f} %'.format(100*train_acc[0][1], 100*val_acc[1]))
        if 0.9 <= val_acc[1] and val_acc[1]<0.98 and val_acc[2]>0.25:
            print('----------------------------------------------------------------------------')
        print ('Negtive Recall : {:.2f} %  {:.2f} %'.format(100*train_acc[0][2], 100*val_acc[2]))


        if 1 == len(list_sort) and 0.9 <= val_acc[1] and val_acc[1]<0.98 and val_acc[2]>=0.25:
            list_sort.insert(0, [val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
        saveflag = True
        i = 0
        while i< min(len(list_sort),50) and 1 <> len(list_sort) and 0.9 <= val_acc[1] and val_acc[1]<0.98 and val_acc[2]>=0.25:
            if val_acc[1] > list_sort[i][2] and abs(train_acc[0][1]-val_acc[1])< 0.1 and train_acc[0][0] < 0.98:
                list_sort.insert(i, [val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
                break
            elif val_acc[1] == list_sort[i][2] and val_acc[2] > list_sort[i][3] and \
                abs(train_acc[0][1]-val_acc[1])< 0.1 and train_acc[0][0] < 0.98:
                list_sort.insert(i, [val_acc[0],val_acc[3],val_acc[1],val_acc[2],epoch])
                break
            else:
                i +=1
        if 1==len(list_sort) or 0.9 > val_acc[1] or val_acc[1]>=0.98 or val_acc[2]<0.25:
            saveflag = False
            
        fout.write('Accuracy       : {:.2f} %'.format(100*train_acc[0][0]))
        fout.write('Precision      : {:.2f} %'.format(100*train_acc[0][3]))
        fout.write('Positive Recall: {:.2f} %'.format(100*train_acc[0][1]))
        fout.write('Negtive Recall : {:.2f} %'.format(100*train_acc[0][2]))

        fout.write('Val Accuracy       : {:.2f} %'.format(100*val_acc[0]))
        fout.write('Val Precision      : {:.2f} %'.format(100*val_acc[3]))
        fout.write('Val Positive Recall: {:.2f} %'.format(100*val_acc[1]))
        fout.write('Val Negtive Recall : {:.2f} %'.format(100*val_acc[2]))
        if True == saveflag:
            saveto = ('1w_3k_7k_SavedModels_Epoch_'+str(round(val_acc[1],2)*10)+'_'+str(round(val_acc[2],2)*10)+'_' + str(epoch) +
                     '_dro_' + str(dro) +'_lr_'+ str(lr) +'_ts_'+ str(threshold)+ '.params')
            write_model_param(networkout,modelsavedir,saveto)
        else:
            time.sleep(30)
            
    for one in list_sort[:50]:
        print(one)
    # After training, we compute and print the test error:
    fout.close()
    print ("Training Completed!")
    return 0
Beispiel #9
0
 def dl_progress(count, block_size, total_size):
     global progbar
     if progbar is None:
         progbar = Progbar(total_size)
     else:
         progbar.update(count * block_size)
Beispiel #10
0
 def dl_progress(count, block_size, total_size, progbar=None):
     if progbar is None:
         progbar = Progbar(total_size)
     else:
         progbar.update(count * block_size)
Beispiel #11
0
def mainmodel(model=modeltype, mini_batch=20,num_epochs=30, dro=0.7,lr=0.0001, preload=preload,saveto=saveto):
    print("model:%s minibatch:%d num_epochs:%d dropout:%f learingrate:%f\n" % (model,mini_batch,num_epochs,dro,lr)) 
   
    # Load the dataset
    print("Loading data...")
    X_train, y_train, X_val, y_val, X_test, y_test = load_data()

    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('input')
    target_var = T.ivector('targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")

        ## add input_var when needed in transfer learning model
    network = build_model(input_var=input_var,dro=dro)
 
    if preload:
#        with open(preload, 'r') as f:
#            data = pickle.load(f)
#        print(data)
#        lasagne.layers.set_all_param_values(model, data)
        if model=='vgg':
            read_model_param(network['fc7'],modelloaddir, preload)
        else:
            read_model_param(network['pool5/7x7_s1'],modelloaddir, preload)
        print('pretrained model loaded')
    start_time = time.time()
    networkout=network['prob']
    #networkout=network
    prediction = lasagne.layers.get_output(networkout)
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = loss.mean()
    acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    params = lasagne.layers.get_all_params(networkout, trainable=True)
    updates = lasagne.updates.adagrad(
            loss, params, learning_rate=lr, epsilon=1e-06)

    test_prediction = lasagne.layers.get_output(networkout, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
                                                            target_var)
    test_loss = test_loss.mean()

    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    train_fn = theano.function([input_var, target_var], [loss,acc], updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])

    # Finally, launch the training loop.
    print("Starting training...")
    time.time()-start_time
    # We iterate over epochs:
    train_accuracy=[]
    train_losses=[]
    val_accuracy=[]
    val_losses=[]
    time_id=str(int(time.time()))
    #outfilename='./benchmark/'+model+'_'+str(mini_batch)+'_'+str(num_epochs)+'_'+str(dro)+'_'+str(lr)+'_'+time_id+'.txt'
    outfilename='./benchmark/'+model+'_'+str(mini_batch)+'_'+str(num_epochs)+'_'+str(dro)+'_'+str(lr)+'.txt'
    print (outfilename)
    outfile=open(outfilename,'w')
    #outfile.write("model:%s mini_batch:%s num_epochs:%s dro:%s learningrate:%s\n" % (model,mini_batch,num_epochs,dro,lr))
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        print('Epoch',epoch)
        train_err = 0
        train_acc = 0
        train_batches = 0
        progbarcount = 0
        progbar=Progbar(30)
        for batch in iterate_minibatches(X_train, y_train, mini_batch, shuffle=True):
            inputs, targets = batch
            progbarcount = progbarcount + np.float(len(targets))/len(y_train)*25
            err, acc = train_fn(inputs, targets)
            train_err += err
            train_acc += acc
            train_batches += 1
            #print(train_batches)
            progbar.update(progbarcount,values=[('acc',round(train_acc/train_batches,3)),
                                                ('loss',round(train_err/train_batches,3))])
            
        train_accuracy.append(round(train_acc/train_batches,3))
        train_losses.append(round(train_err/train_batches,3))
        # And a full pass over the validation data:
        val_err = 0
        val_acc = 0
        val_batches = 0
        for batch in iterate_minibatches(X_val, y_val, np.min([mini_batch,len(y_val)]), shuffle=False):
            inputs, targets = batch
            err, acc = val_fn(inputs, targets)
            val_err += err
            val_acc += acc
            val_batches += 1
        #print(val_acc,val_err,val_batches)
        progbar.update(30,values=[('val_acc',round(val_acc/val_batches,3)),
                                            ('val_loss',round(val_err/val_batches,3))])
        val_accuracy.append(round(val_acc/val_batches,3))
        val_losses.append(round(val_err/val_batches,3))

    # After training, we compute and print the test error:
    print (val_losses)
    '''
    outfile.write('\n')
    outfile.write('train_acc: ')
    for it in train_accuracy:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('train_loss: ')
    for it in train_losses:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('val_acc: ')
    for it in val_accuracy:
        outfile.write('%s,' % it)
    outfile.write('\n')
    outfile.write('vall_loss: ')
    for it in val_losses:
        outfile.write('%s,' % it)
    outfile.write('\n')
    '''
    test_err = 0
    test_acc = 0
    test_batches = 0
    for batch in iterate_minibatches(X_val, y_val, np.min([mini_batch,len(y_val)]), shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        test_err += err
        test_acc += acc
        test_batches += 1
    print("")
    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test accuracy:\t\t{:.2f} %".format(
        test_acc / test_batches * 100))

    #outfile.write('\n')
    #outfile.write("test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))
    outfile.write("test accuracy:\t{:.2f}".format(test_acc / test_batches))
    outfile.close()
    write_model_param(networkout,modelsavedir,saveto)
    return train_err/train_batches, train_acc/train_batches * 100, 
    val_err/val_batches, val_acc/val_batches * 100