Esempio n. 1
0
def main():
    args = get_args()
    # set gpu ids
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for str_id in str_ids:
        id = int(str_id)
        if id >= 0:
            args.gpu_ids.append(id)

    if args.training:
        if args.model == "semisupervised_cycleGAN":
            print("Training semi-supervised cycleGAN")
            model = md.semisuper_cycleGAN(args)
            model.train(args)
        if args.model == "supervised_model":
            print("Training base model")
            model = md.supervised_model(args)
            model.train(args)
    if args.testing:
        print("Testing")
        test(args)
    if args.validation:
        print("Validating")
        validation(args)
Esempio n. 2
0
def merge_present_second(xs, ys):
    """Return only those items that are present only in the second list, but not in the first."""
    result = []
    xi = 0
    yi = 0

    while True:
        if xi >= len(
                xs
        ):  # 1st list has finished, all remaining items from the 2nd list exist only on the 2nd list.
            result.extend(ys[yi:])
            return result

        if yi >= len(
                ys
        ):  # 2nd list has finished, there are no more items to add to the result.
            return result

        if xs[xi] > ys[
                yi]:  # The smallest item from the 2nd list is smaller than the smallest item from the 1st list
            result.append(ys[yi])  # That means in exist only on the 2nd list.
            yi += 1
        elif xs[xi] == ys[
                yi]:  # both iteams are equalt, they are on both lists, keep going.
            xi += 1
            yi += 1
        else:  # xs[xi] < ys[yi]
            xi += 1

    test(merge_present_second([1, 2, 3, 5, 6], [1, 2, 7, 8, 9]) == [7, 8, 9])
def execute():
    """
    Experiment difference if pretrained on imagenet or not
    :return:
    """
    train_idg = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2,
                                   zoom_range=0.2, horizontal_flip=True)

    val_idg = ImageDataGenerator(width_shift_range=0.25, height_shift_range=0.25, horizontal_flip=True)

    train_gen_boneage, val_gen_boneage, steps_per_epoch_boneage, validation_steps_boneage = get_gen(train_idg, val_idg,
                                                                                                    IMG_SIZE,
                                                                                                    BATCH_SIZE_TRAIN,
                                                                                                    BATCH_SIZE_VAL,
                                                                                                    'boneage',
                                                                                                    disease_enabled=False)

    model = get_model(model='winner', gender_input_enabled=False, age_output_enabled=True,
                      disease_enabled=False,
                      pretrained=PRETRAINED)

    OPTIMIZER = Adam(lr=1e-3)

    history = train(train_gen_boneage, val_gen_boneage, steps_per_epoch_boneage, validation_steps_boneage, model,
                    OPTIMIZER, LOSS, LEARNING_RATE, NUM_EPOCHS, finetuning=False,
                    num_trainable_layers=NUM_TRAINABLE_LAYERS)

    print('Boneage dataset (final) history:', history.history)

    test(model)

    backend.clear_session()
def main():
  args = get_args()
  # set gpu ids
  str_ids = args.gpu_ids.split(',')
  args.gpu_ids = []
  for str_id in str_ids:
    id = int(str_id)
    if id >= 0:
      args.gpu_ids.append(id)

  ### For setting the image dimensions for different datasets
  if args.crop_height == None and args.crop_width == None:
    if args.dataset == 'voc2012':
      args.crop_height = args.crop_width = 321
    elif args.dataset == 'acdc':
      args.crop_height = args.crop_width = 256
    elif args.dataset == 'cityscapes':
      args.crop_height = 512
      args.crop_width = 1024

  if args.training:
    if args.model == "semisupervised_cycleGAN":
      print("Training semi-supervised cycleGAN")
      model = md.semisuper_cycleGAN(args)
      model.train(args)
    if args.model == "supervised_model":
      print("Training base model")
      model = md.supervised_model(args)
      model.train(args)
  if args.testing:
      print("Testing")
      test(args)
  if args.validation:
      print("Validating")
      validation(args)
Esempio n. 5
0
def test_suite():
    print("Beginning Tests:\n---")
    t.test(r_sum([1, 2, [11, 13], 8]) == 35)

    t.test(r_max([2, 9, [1, 13], 8, 6]) == 13)
    t.test(r_max([2, [[100, 7], 90], [1, 13], 8, 6]) == 100)
    t.test(r_max([[[13, 7], 90], 2, [1, 100], 8, 6]) == 100)
    t.test(r_max(["joe", ["sam", "ben"]]) == "sam")
Esempio n. 6
0
def test_suite():
    testing.test(my_sum([1, 2, 3, 4]) == 10)
    testing.test(my_sum([1.25, 2.5, 1.75]) == 5.5)
    testing.test(my_sum([1, -2, 3]) == 2)
    testing.test(my_sum([ ]) == 0)
    testing.test(my_sum(range(11)) == 55)  # 11 is not included in the list.

    for i in range(1, 5):
        collatz(i)
Esempio n. 7
0
def cross_validation():
	list=[]
	tot_accuracy=0
	fold_size=size_of_file()//10
	fileee=open('stage2_result.txt')
    
	for f in fileee:
		f=re.sub('\n', '', f)
		list.append(f)
	fileee.close()	
	
	for i in range(9,-1,-1):
		test_set=list[i*fold_size:][:fold_size]
		training_set=list[0:i*fold_size] + list[(i+1) * fold_size:]
		
		with open('training_set.txt', 'w') as f:
			for s in training_set:
				f.write(s + '\n')
			f.close()
		
		multinomial_model.training_data('training_set.txt')
		correct,tp,tn,fp,fn=testing.test(training_set,test_set)
		accuracy = float(tp+tn)/float(tp+tn+fp+fn)
		print (accuracy*100)
		tot_accuracy+=accuracy
	print ((tot_accuracy/10)*100)
Esempio n. 8
0
def optimize_sp_for_fixed_beta(params: pr.Parameters, groups, real_beta):
    # print("!!! Beginning experiment !!!")
    (x, y) = tst.generate_training_data(real_beta, params)

    def f(log_sparsity_param):
        params.sparsity_param = math.pow(2, log_sparsity_param)
        (learned_beta, runtime, cycles,
         convergence_type) = pr.learn(x, y, groups, params)
        avg_error = tst.test(learned_beta, real_beta, params)
        return avg_error

    #Optimize sparsity parameter several times (stochastic optimization)
    opt_log_params = []
    for i in range(5):
        best_log_param = minimize(f, 0.0, 12.0, 0.3)
        opt_log_params.append(best_log_param)
        print("optimizing sparsity param...", math.pow(2, best_log_param))
    # print("sparsity params for fixed beta:", opt_log_params)
    opt_sparsity_param = math.pow(2, np.mean(opt_log_params))

    # print("TRAINING SPARSITY:", params.training_feature_sparsity)
    print("Average optimum sparsity parameter: ", opt_sparsity_param, "= 2^",
          np.mean(opt_log_params))

    # Re-run experiment with optimal sparsity parameter
    params.sparsity_param = opt_sparsity_param
    (learned_beta, runtime, cycles,
     convergence_type) = pr.learn(x, y, groups, params)
    avg_error = tst.test(learned_beta, real_beta, params)
    print("Performance on beta with optimum sparsity parameter",
          params.sparsity_param, "runtime:", int(runtime), "cycles:", cycles,
          "avg error:", round(avg_error, 3), "convergence:", convergence_type)

    return runtime, cycles, avg_error, convergence_type, opt_sparsity_param
Esempio n. 9
0
def fit_model(net, epochs, device, trainloader, testloader, classes, loss_fun,
              optimizer, scheduler):

    EPOCHS = epochs
    # Starting Model Training
    for epoch in range(EPOCHS):
        print("EPOCH:", epoch + 1)
        train_result = training.train(net, device, trainloader, optimizer,
                                      epoch, loss_fun)

        net = train_result['model']

        val_result = testing.test(net, device, testloader, classes, loss_fun)
        scheduler.step(val_result['val_loss'][-1] /
                       100)  # should be called after Validation

    # Training Finished
    result = {
        'model': net,
        'val_acc': val_result['val_acc'],
        'val_loss': val_result['val_loss'],
        'train_acc': train_result['train_acc'],
        'train_loss': train_result['train_loss']
    }
    print('Finished Training')
    return result
Esempio n. 10
0
def fit_model(net, epochs, device, trainloader, testloader, classes):
    # Loss Function & Optimizer and Step learning rate
    loss_fun = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=5e-4)
    scheduler = StepLR(optimizer, step_size=6, gamma=0.1)

    EPOCHS = epochs
    # Starting Model Training
    for epoch in range(EPOCHS):
        print("EPOCH:", epoch + 1)
        train_result = training.train(net, device, trainloader, optimizer,
                                      epoch, loss_fun)
        net = train_result['model']
        scheduler.step()

        val_result = testing.test(net, device, testloader, classes, loss_fun)

    # Training Finished
    result = {
        'model': net,
        'val_acc': val_result['val_acc'],
        'val_loss': val_result['val_loss'],
        'train_acc': train_result['train_acc'],
        'train_loss': train_result['train_loss']
    }
    print('Finished Training')
    return result
Esempio n. 11
0
def main():
    args = parse_args()

    if args.subparser_name == 'mixed_speech_generator':
        create_mixed_tracks_data(args.data_dir, args.base_speaker_ids,
                                 args.noisy_speaker_ids, args.audio_dir,
                                 args.dest_dir, args.num_samples, args.num_mix,
                                 args.num_mix_speakers)
    elif args.subparser_name == 'audio_preprocessing':
        save_spectrograms(args.data_dir, args.speaker_ids, args.audio_dir,
                          args.dest_dir, args.sample_rate, args.max_wav_length)
    elif args.subparser_name == 'video_preprocessing':
        save_face_landmarks(args.data_dir, args.speaker_ids, args.video_dir,
                            args.dest_dir, args.shape_predictor, args.ext)
    elif args.subparser_name == 'show_face_landmarks':
        show_face_landmarks(args.video,
                            fps=args.fps,
                            predictor_params=args.shape_predictor,
                            full_draw=args.full)
    elif args.subparser_name == 'tbm_computation':
        save_target_binary_masks(args.data_dir, args.speaker_ids,
                                 args.audio_dir, args.dest_dir,
                                 args.mask_factor, args.sample_rate,
                                 args.max_wav_length, args.num_ltass)
    elif args.subparser_name == 'tfrecords_generator':
        create_dataset(args.data_dir, args.num_speakers, args.video_dir,
                       args.tbm_dir, args.base_audio_dir, args.mix_audio_dir,
                       args.norm_data_dir, args.dest_dir, args.mode,
                       args.delta)
    elif args.subparser_name == 'training':
        config = Configuration(args.learning_rate, args.updating_step,
                               args.learning_decay, args.dropout,
                               args.batch_size, args.opt, args.video_dim,
                               args.audio_dim, args.num_audio_samples,
                               args.epochs, args.hidden_units, args.layers,
                               args.regularization, args.mask_threshold)
        train(args.model, args.data_dir, args.train_set, args.val_set, config,
              args.exp, args.mode)
    elif args.subparser_name == 'testing':
        test(args.data_dir, args.test_set, args.exp, args.ckp, args.video_dim,
             args.audio_dim, args.mode, args.num_audio_samples,
             args.mask_threshold, args.mix_eval, args.output_dir,
             args.mask_dir)
    else:
        print("Bad subcommand name.")
        sys.exit(1)
Esempio n. 12
0
def run(
    name,
    root,
    binary,
    epochs,
    batch,
    optim,
    learningrate,
    patience,
    tensorboard,
    weights,
    gradients,
    save,
    test,
    filename,
):
    """
    SST Details:\n
    -----------\n
    root: only root sentences\n
    all: sentences parsed into phrases\n
    binary: only rows with sentiment negative, positive\n
    fine: negative, partially negative, neutral, partially positive, positive\n

    SST Models: rnn, lstm, gru, bilstm, conv1d

    """
    if not test:
        train(
            name,
            root,
            binary,
            epochs,
            batch,
            optim,
            learningrate,
            patience,
            tensorboard,
            weights,
            gradients,
            save,
        )
    else:
        from testing import test

        test(root, binary, filename)
Esempio n. 13
0
def train_model(args, device):
    dataset = Dataset2d_3d()
    train_loader = DataLoader(dataset,
                              pin_memory=True,
                              shuffle=True,
                              batch_size=64)
    model = CNN_Autoencoder()
    print(model)
    #criterion = torch.nn.MSELoss()
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters())
    train(model, train_loader, optimizer, criterion, device, args.epochs)
    test_dataset = Dataset2d_3d(istrain=False)
    testloader = DataLoader(test_dataset,
                            pin_memory=True,
                            shuffle=True,
                            batch_size=64)
    test(model, testloader, criterion, device)
def run_experiment(params, generate_beta):
    groups = tst.generate_groups(params)
    real_beta = generate_beta(params, groups)
    (x, y) = tst.generate_training_data(real_beta,params)

    (learned_beta, runtime, cycles, convergence_type) = pr.learn(x, y, groups, params)

    avg_error = tst.test(learned_beta, real_beta, params)
    return runtime, cycles, avg_error, convergence_type
def get_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--mode', '-m', choices= ["train", "test"], required= True, \
        help = "There are two types mode, Test mode is to classify a sample image using trained model and Train mode is to train your model")
    parser.add_argument('--models',
                        type=int,
                        choices=[0, 1, 2, 3, 4, 5, 6],
                        default=0,
                        help="The model you will train or use")
    parser.add_argument('--model_path', type=str, default="data\\models")
    parser.add_argument('--img_size', type=int, default=112)

    # For training
    parser.add_argument('--epoch', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--learning_rate', '-lr', type=float, default=0.001)
    parser.add_argument('--momentum', type = float , default = 0.9, \
        help = "Hyperparameters to be used on the optimizer")
    parser.add_argument('--img_path', type=str, default="data\\images")
    parser.add_argument('--annotation_path',
                        type=str,
                        default='data\\annotations')

    # For continuous training
    parser.add_argument('--model_load_flag', action = 'store_true', \
        help = "When you want to keep training your model, set True. If it's True, you must write the name of the model you are going to load")
    parser.add_argument('--pre_trained_model_name', required= False, default = "Vgg16_20_checkpoint.pth",\
        help = "When the 'model_load_flag' is True, This is required to load pre-trained model to train continuously")

    # For testing
    parser.add_argument('--test_model_name',
                        type=str,
                        default='Vgg16_20_checkpoint.pth')
    parser.add_argument('--test_image_path', type = str, default = "data\\samples",\
        help = "If you want to classify cats and dogs images using pre-trained model, put the images in this directory")
    parser.add_argument('--test_image_name',type = str, default = 'sample_0.jpg',\
        help = "This is the image file name you want to classify")

    args = parser.parse_args()
    if args.mode == "train":
        train(args)
    else:
        test(args)
Esempio n. 16
0
def test_suite():
    # Setup, no tests reqiured
    my_inbox = SMS_store()
    my_inbox.add_new_arrival(12345678, 124512, "Netflix and chill?")
    my_inbox.add_new_arrival(98765432, 123456, "Ey gril wan som fucc?")
    my_inbox.add_new_arrival(13453453, 234643, "Bub u wanna get kebab?")

    # Start tests
    t.test(my_inbox.message_count() == 3)

    t.test(my_inbox.get_unread_indexes() == [0, 1, 2])

    t.test(my_inbox.get_message(1) == (98765432, 123456, "Ey gril wan som fucc?"))
    t.test(my_inbox.get_unread_indexes() == [0, 2])

    my_inbox.delete(1)
    my_inbox.clear()

    t.test(my_inbox.message_count() == 0)
def mains(df, train_file, depth, test_file=None, ensemble=None):
    """
    This function helps to build the 
    decision tree and evaluates it.
    Arguments:
    1. train_file: The input train filename as string.
    2. depth: The depth of the decision tree.
    3. test_file: The test file as a string, but defaulted
    to None (Cross validation performed)
    4. weights: The weights with which the entropy is calculated, defaulted 
    to None.
    """

    dt_dict = tree.main(df, train_file, depth=depth, ensemble=ensemble)
    # function call for decision tree construction
    df_train = testing.test(train_file, dt_dict)
    # function call to evaluate the decision using train data
    df_test = testing.test(test_file, dt_dict)
    # func call to test on test data
    return df_train, df_test
Esempio n. 18
0
 def __init__(self):
     super(QWidget, self).__init__()
     self.tform = QWidget()
     self.uit = test()
     self.tform.setWindowFlag(Qt.WindowStaysOnTopHint)
     self.tform.setAttribute(QtCore.Qt.WA_TranslucentBackground)
     self.tform.setWindowFlag(Qt.FramelessWindowHint)
     self.uit.setupUi(self.tform)
     self.tform.setWindowFlag(Qt.WindowStaysOnTopHint)
     self.tform.setAttribute(QtCore.Qt.WA_TranslucentBackground)
     self.tform.show()
     self.uit.close.clicked.connect(self.closepopup)
Esempio n. 19
0
def test(seed, layer_norm, full, action_repeat, fail_reward, exclude_centering_frame,
         integrator_accuracy, render, **kwargs):
    # Configure things.
    rank = MPI.COMM_WORLD.Get_rank()
    if rank != 0:
        logger.set_level(logger.DISABLED)
    # Main env
    env = create_environment(render, full, action_repeat,
                             fail_reward, exclude_centering_frame,
                             integrator_accuracy)
    env.reset()
    eval_env = None

    # Parse noise_type
    nb_actions = env.action_space.shape[-1]

    # Configure components.
    memory = ReplayBufferFlip(int(5e6),
                              False,
                              env.get_observation_names(),
                              env.action_space.shape,
                              env.observation_space.shape)
    actor = Actor(nb_actions, layer_norm=layer_norm)
    critic = Critic(layer_norm=layer_norm)

    # Seed everything to make things reproducible.
    seed = seed + 1000000 * rank
    logger.info('rank {}: seed={}, logdir={}'.format(
        rank, seed, logger.get_dir()))
    tf.reset_default_graph()
    set_global_seeds(seed)
    env.seed(seed)

    # Disable logging for rank != 0 to avoid noise.
    if rank == 0:
        start_time = time.time()

    del kwargs['func']
    testing.test(env=env, actor=actor, critic=critic, memory=memory, **kwargs)
    env.close()
Esempio n. 20
0
def groups_loss():

    PATHS = [
        './mnist_net1.pth', './mnist_net2.pth', './mnist_net3.pth',
        './mnist_net4.pth', './mnist_net5.pth'
    ]

    min_loss = sys.maxsize * 2 + 1

    min_loss_t = sys.maxsize * 2 + 1

    min_groups = groups_generator.generate_groups()

    min_groups_t = groups_generator.generate_groups()

    max_accuracy = 0

    for i in range(5):

        groups = groups_generator.generate_groups()

        (train_d_set, test_d_set) = divide_dataset.divide_dataset(groups)

        loss = training.train(train_d_set, PATHS[i])

        print(f'Groups: {groups}')

        print(f'Loss: {loss}')

        if loss < min_loss:
            min_loss = loss
            min_groups = groups
            PATH = PATHS[i]

        (loss_t, accuracy) = testing.test(test_d_set, PATHS[i])

        if loss_t < min_loss_t:
            min_loss_t = loss_t
            min_groups_t = groups
            PATH_T = PATHS[i]
            min_accuracy = accuracy

        if accuracy > max_accuracy:
            max_accuracy = accuracy

    print(f'\nThe minimum loss: {min_loss}')
    print(f'\nThe minimum loss for training: {min_loss_t}')
    print(f'\nThe accuracy is: {min_accuracy}')
    print(f'\nThe max accuracy is: {max_accuracy}')

    return PATH, min_groups, PATH_T, min_groups_t
def model_eval(dataFile, nclust, maxiter, epsilon):
    #reading data: X- data, y- class att.
    X, y = readingData.dataPrep(dataFile)
    #training
    start_time = time.time()
    mu, sigma, prior = EM.em_clustering(X, nclust, maxiter, epsilon)
    averageTraningTime = time.time() - start_time
    #testing
    W = EM.e_step(X, mu, sigma, prior, nclust)
    accuracy = testing.test(y, W, X)
    averageTraningTime = round(averageTraningTime, 3)
    accuracy = int(round(accuracy * 100))
    print(" Traning running time :%s seconds " % averageTraningTime)
    print("accuracy:%s%%" % accuracy)
Esempio n. 22
0
def experiment_with_fixed_params(params: pr.Parameters, gen_beta):
    # print("!!! Beginning experiment !!!")
    groups = tst.generate_groups(params)
    real_beta = gen_beta(params, groups)
    (x, y) = tst.generate_training_data(real_beta, params)

    # Re-run experiment with optimal sparsity parameter
    (learned_beta, runtime, cycles,
     convergence_type) = pr.learn(x, y, groups, params)
    avg_error = tst.test(learned_beta, real_beta, params)
    print("Number of Groups:", params.num_groups,
          "runtime:", int(runtime), "cycles:", cycles, "avg error:",
          round(avg_error, 3), "convergence:", convergence_type)

    return runtime, cycles, avg_error, convergence_type
Esempio n. 23
0
def run_experiment(params: pr.Parameters, gen_beta):

    #Generate learning algorithm
    groups = tst.generate_groups(params)
    real_beta = gen_beta(params, groups)
    (x, y) = tst.generate_training_data(real_beta, params)

    #Learning algorithm
    (learned_beta, runtime, cycles,
     convergence_type) = pr.learn(x, y, groups, params)

    #Test accuracy
    avg_error = tst.test(learned_beta, real_beta, params)
    print("runtime:", int(runtime), "cycles:", cycles, "avg error:",
          round(avg_error, 3), "convergence:", convergence_type)

    return runtime, cycles, avg_error, convergence_type
Esempio n. 24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("data",
                        help="File of measure class pairs to test.",
                        type=str)
    parser.add_argument("dimen",
                        help="Tuple representing measure space.",
                        type=str)
    parser.add_argument(
        "priors",
        help="File designating prior probabilities of classes.",
        type=str)
    parser.add_argument(
        "conditionals",
        help="File designating class conditional probabilities.",
        type=str)
    parser.add_argument(
        "--eGain",
        "-e",
        help=
        "Economic gain matrix for data. If not provided assumes identity matrix.",
        type=str)
    parser.add_argument(
        "--vFolds",
        "-v",
        help=
        "Number of v-fold partitions for testing. If not provided, assumes all data is for testing.",
        type=int)
    args = parser.parse_args()

    # Reading data
    dimen = eval(args.dimen)
    measures, tags = reader.readData(args.data, dimen)
    priors = reader.readPriors(args.priors)
    conds = reader.readCCP(args.conditionals)
    e = False
    if args.eGain:
        e = reader.readGain(args.eGain)

    classifier = BayesClassifier(priors, conds, eGain=e)

    expGain = test(classifier, measures, tags, V=args.vFolds)

    print("The expected gain for the data is: {}".format(expGain))
Esempio n. 25
0
# Excercises from chapter 11 - Lists.

from testing import test

#Excercise 5
"""Lists can be used to represent mathematical vectors. In this exercise and several that follow you will write functions to perform standard operations on vectors. Create a script named vectors.py and write Python code to pass the tests in each case."""


def add_vectors(u, v):
    new_vector = []
    for i in range(len(u)):
        new_vector.append(u[i] + v[i])
    return new_vector


test(add_vectors([1, 1], [1, 1]) == [2, 2])
test(add_vectors([1, 2], [1, 4]) == [2, 6])
test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])

#Excercise 6
"""Write a function scalar_mult(s, v) that takes a number, s, and a list, v and returns the scalar multiple of v by s. :"""


def scalar_mult(s, v):
    sm_list = []
    for item in v:
        sm_list.append(item * s)
    return sm_list


test(scalar_mult(5, [1, 2]) == [5, 10])
Esempio n. 26
0
        if minibatch_count % N == 0 and minibatch_count != 0:

            # Print the loss averaged over the last N mini-batches
            N_minibatch_loss /= N
            print('Epoch %d, average minibatch %d loss: %.3f' %
                  (epoch + 1, minibatch_count, N_minibatch_loss))

            # Add the averaged loss over N minibatches and reset the counter
            avg_minibatch_loss.append(N_minibatch_loss)
            N_minibatch_loss = 0.0

        # validate every 2 N minibatches.
        if minibatch_count % (2 * N) == 0 and minibatch_count != 0:

            # validation
            total_val_loss, avg_val_loss, accuracy, precision, recall, balance = testing.test(
                model, computing_device, val_loader, criterion)
            if total_val_loss < current_best_val:
                current_best_val = total_val_loss
                best_params = model.state_dict()
                increasing_epochs = 0
            else:
                increasing_epochs += 1
            if increasing_epochs > early_stop_epochs:
                break

            print(total_val_loss, avg_val_loss, accuracy, precision, recall,
                  balance)

    print("Finished", epoch + 1, "epochs of training")

print("Training complete after", epoch, "epochs, with total loss: ",
Esempio n. 27
0
def test_suite():
    testing.test(cal_hypo((3, 4)) == 5)
Esempio n. 28
0
def test_suite():
    print("Beginning tests\n------------")
    testing.test(add_vectors([1, 1], [1, 1]) == [2, 2])
    testing.test(add_vectors([1, 2], [1, 4]) == [2, 6])
    testing.test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])

    testing.test(scalar_mult(5, [1, 2]) == [5, 10])
    testing.test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])
    testing.test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])

    testing.test(dot_product([1, 1], [1, 1]) ==  2)
    testing.test(dot_product([1, 2], [1, 4]) ==  9)
    testing.test(dot_product([1, 2, 1], [1, 4, 3]) == 12)

    testing.test(replace("Mississippi", "i", "I") == "MIssIssIppI")
    s = "I love spom! Spom is my favorite food. Spom, spom, yum!"
    testing.test(replace(s, "om", "am") ==
    "I love spam! Spam is my favorite food. Spam, spam, yum!")
    testing.test(replace(s, "o", "a") ==
    "I lave spam! Spam is my favarite faad. Spam, spam, yum!")
def execute():
    """
    Experiment with different number of freezed layers: between 10 and 100 with step 10 tried
    :return:
    """
    train_idg = ImageDataGenerator(rotation_range=20,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

    val_idg = ImageDataGenerator(width_shift_range=0.25,
                                 height_shift_range=0.25,
                                 horizontal_flip=True)

    train_gen_chest, val_gen_chest, steps_per_epoch_chest, validation_steps_chest = get_gen(
        train_idg,
        val_idg,
        IMG_SIZE,
        BATCH_SIZE_TRAIN,
        BATCH_SIZE_VAL,
        'chest_boneage_range',
        disease_enabled=False)
    train_gen_boneage, val_gen_boneage, steps_per_epoch_boneage, validation_steps_boneage = get_gen(
        train_idg,
        val_idg,
        IMG_SIZE,
        BATCH_SIZE_TRAIN,
        BATCH_SIZE_VAL,
        'boneage',
        disease_enabled=False)

    model = get_model(model='winner',
                      gender_input_enabled=True,
                      age_output_enabled=True,
                      disease_enabled=False,
                      pretrained='imagenet')

    OPTIMIZER = Adam(lr=1e-3)

    history = train(train_gen_chest,
                    val_gen_chest,
                    steps_per_epoch_chest,
                    validation_steps_chest,
                    model,
                    OPTIMIZER,
                    LOSS,
                    LEARNING_RATE,
                    NUM_EPOCHS,
                    finetuning=False,
                    num_trainable_layers=NUM_TRAINABLE_LAYERS)

    print('Chest dataset (final) history:', history.history,
          'NUM_TRAINABLE_LAYERS:', NUM_TRAINABLE_LAYERS)

    OPTIMIZER = hp.OPTIMIZER_FINETUNING  # works better for finetuning

    history = train(train_gen_boneage,
                    val_gen_boneage,
                    steps_per_epoch_boneage,
                    validation_steps_boneage,
                    model,
                    OPTIMIZER,
                    LOSS,
                    LEARNING_RATE,
                    NUM_EPOCHS,
                    finetuning=True,
                    num_trainable_layers=NUM_TRAINABLE_LAYERS)

    print('Boneage dataset (final) history:', history.history,
          'NUM_TRAINABLE_LAYERS:', NUM_TRAINABLE_LAYERS)

    test(model)

    backend.clear_session()
Esempio n. 30
0
def test_suite():
    r = Rectangle(point.Point(0, 0), 10, 5)
    t.test(r.contains(point.Point(0, 0)))
    t.test(r.contains(point.Point(3, 3)))
    t.test(not r.contains(point.Point(3, 7)))
    t.test(not r.contains(point.Point(3, 5)))
    t.test(r.contains(point.Point(3, 4.99999)))
    t.test(not r.contains(point.Point(-3, -3)))

    """
#!/usr/bin/env python3

from testing import test


def flatten(indata):
    flt_list = []
    new_list = []

    for elem in indata:
        if type(elem) == type([]):
            flt_list.extend(flatten(elem))
        else:
            flt_list.append(elem)

    return flt_list


test(flatten([2, 9, [2, 1, 13, 2], 8, [2, 6]]) == [2, 9, 2, 1, 13, 2, 8, 2, 6])
test(flatten([[9, [7, 1, 13, 2], 8], [7, 6]]) == [9, 7, 1, 13, 2, 8, 7, 6])
test(flatten([[9, [7, 1, 13, 2], 8], [2, 6]]) == [9, 7, 1, 13, 2, 8, 2, 6])
test(
    flatten([["this", ["a", ["thing"], "a"], "is"], ["a", "easy"]]) ==
    ["this", "a", "thing", "a", "is", "a", "easy"])
test(flatten([]) == [])
Esempio n. 32
0
def test_suite():
    testing.test(vowel_in_string("Compscience") == "Cmpscnc")
    testing.test(vowel_in_string("ABC") == "BC")
    testing.test(vowel_in_string("") == "")
Esempio n. 33
0
def testing_suite():
    print("Beginning Tests\n--------")
    point1 = Point(1, 2)
    point2 = Point(4, 6)

    t.test(point1.distance(point2) == 5.0)
Esempio n. 34
0
def random_forest(path_train,
                  path_test,
                  name_identifiers,
                  name_targets,
                  features,
                  delimiter,
                  num_cores=1):
    '''
    this method performs the training,testing and evaluation of the
    random forest algorithm.
    
    @type  path_train: str
    @param path_train: full path to csv (first line contains headers).
    delimiter should be specified with param delimiter
    
    @type  path_test: str
    @param path_test: full path to csv (first line contains headers).
    delimiter should be specified with param delimiter

    @type  name_identifiers: str
    @param name_identifiers: name of column containing identifiers
    
    @type  name_targets: str
    @param name_targets: name of column containing targets
    
    @type  features: str 
    @param features: list of features to be used
    
    @type  delimiter: str
    @param delimiter: delimiter used in csv. tested with "," and "\t"
    
    @type  num_cores: int
    @param num_cores: [optional]: num of cores you want to use in training
    
    @rtype: tuple
    @return: (output_classifier,evaluation). both are dicts. output_classifier
    maps identifier -> output_classifier. evaluation maps all kinds of evaluation metrics
    to floats.
    '''
    #call train using class
    identifiers_training,target_training,rf = train(path_train,
                                                  name_identifiers,
                                                  name_targets,
                                                  features,
                                                  output_path_model=None,
                                                  cores=num_cores,
                                                  the_delimiter=delimiter)
    
    #call test using class
    identifiers_test,target,prediction    = test(path_test,
                                                 name_identifiers,
                                                 name_targets,
                                                 features,
                                                 loaded_rf_model=rf,
                                                 path_rf_model=None,
                                                 the_delimiter=delimiter)
    
    #evaluate
    classifier_output,evaluation = evaluate(target, prediction, identifiers_test)
    
    return classifier_output,evaluation
Esempio n. 35
0
def test_suite():
    testing.test(cleanword("what?") == "what")
    testing.test(cleanword("'now!'") == "now")
    testing.test(cleanword("?+='w-o-r-d!,@$()'") ==  "word")

    testing.test(has_dashdash("distance--but"))
    testing.test(not has_dashdash("several"))
    testing.test(has_dashdash("spoke--"))
    testing.test(has_dashdash("distance--but"))
    testing.test(not has_dashdash("-yo-yo-"))

    testing.test(extract_words("Now is the time!  'Now', is the time? Yes, now.") ==
          ['now','is','the','time','now','is','the','time','yes','now'])
    testing.test(extract_words("she tried to curtsey as she spoke--fancy") ==
          ['she','tried','to','curtsey','as','she','spoke','fancy'])

    testing.test(wordcount("now", ["now","is","time","is","now","is","is"]) == 2)
    testing.test(wordcount("is", ["now","is","time","is","now","the","is"]) == 3)
    testing.test(wordcount("time", ["now","is","time","is","now","is","is"]) == 1)
    testing.test(wordcount("frog", ["now","is","time","is","now","is","is"]) == 0)


    testing.test(wordset(["now", "is", "time", "is", "now", "is", "is"]) ==
          ["now", "is", "time"])
    testing.test(wordset(["I", "a", "a", "is", "a", "is", "I", "am"]) ==
          ["I", "a", "is", "am"])
    #testing.test(wordset(["or", "a", "am", "is", "are", "be", "but", "am"]) ==
    #      ["a", "am", "are", "be", "but", "is", "or"])


    testing.test(longestword(["a", "apple", "pear", "grape"]) == 5)
    testing.test(longestword(["a", "am", "I", "be"]) == 2)
    testing.test(longestword(["this","supercalifragilisticexpialidocious"]) == 34)
    testing.test(longestword([ ]) == 0)
Esempio n. 36
0
def test_suite():
    testing.test(remove_punc("I never, ever, said that to you!?!")
                          == "I never ever said that to you")

    #quack()

    testing.test(char_count("Apple", "A") == 1)
    testing.test(char_count("", "a") == 0)

    poem = """
    We real cool. We
    Left school. We

    Lurk late. We
    Strike straight. We

    Sing sin. We
    Thin gin. We

    Jazz June. We
    Die soon."""

    sum_text(poem, "e")

    draw_multi_table()

    testing.test(reverse_str("happy") == "yppah")
    testing.test(reverse_str("Python") == "nohtyP")
    testing.test(reverse_str("") == "")
    testing.test(reverse_str("a") == "a")

    testing.test(mirror_str("good") == "gooddoog")
    testing.test(mirror_str("Python") == "PythonnohtyP")
    testing.test(mirror_str("") == "")
    testing.test(mirror_str("a") == "aa")


    testing.test(remove_letter("a", "apple") == "pple")
    testing.test(remove_letter("a", "banana") == "bnn")
    testing.test(remove_letter("z", "banana") == "banana")
    testing.test(remove_letter("i", "Mississippi") == "Msssspp")
    testing.test(remove_letter("b", "") == "")
    testing.test(remove_letter("b", "c") == "c")

    testing.test(is_palindrome("abba") == True)
    testing.test(not is_palindrome("abab") == True)
    testing.test(is_palindrome("tenet") == True)
    testing.test(not is_palindrome("banana") == True)
    testing.test(is_palindrome("straw warts") == True)
    testing.test(is_palindrome("a") == True)
    # testing.test(is_palindrome(""))    # Is an empty string a palindrome?

    testing.test(sub_string_count("is", "Mississippi") == 2)
    testing.test(sub_string_count("an", "banana") == 2)
    testing.test(sub_string_count("ana", "banana") == 2)
    testing.test(sub_string_count("nana", "banana") == 1)
    testing.test(sub_string_count("nanan", "banana") == 0)
    testing.test(sub_string_count("aaa", "aaaaaa") == 4)

    testing.test(del_sub_str("an", "banana") == "bana")
    testing.test(del_sub_str("cyc", "bicycle") == "bile")
    testing.test(del_sub_str("iss", "Mississippi") == "Missippi")
    testing.test(del_sub_str("eggs", "bicycle") == "bicycle")

    print("----")
    testing.test(del_all_sub_str("an", "banana") == "ba")
    testing.test(del_all_sub_str("cyc", "bicycle") == "bile")
    testing.test(del_all_sub_str("iss", "Mississippi") == "Mippi")
    testing.test(del_all_sub_str("eggs", "bicycle") == "bicycle")
Esempio n. 37
0
        """ Delete all messages from inbox. """
        self.sms_list.clear()

my_inbox = SMS_store()

sms1 = (555123456, "06-07-2020, 12:00", "This is your testing message.")
sms2 = (513123955, "10-07-2020, 07:00", "That's another message.")
sms3 = (799185333, "08-06-2020, 15:15", "That message has beed read.")

# Add three SMS messages to the store
my_inbox.add_new_arrival(*sms1)
my_inbox.add_new_arrival(*sms2)
my_inbox.add_new_arrival(*sms3)

# Check for unread SMS messages.
test(my_inbox.get_unread_indexes() == [0,1,2])

# Check for number of SMS messages in the store.
test(my_inbox.message_count() == 3)

# Get some existing and non-existing SMS messages.
test(my_inbox.get_message(3) == None)
test(my_inbox.get_message(0)[0] == sms1[0])
test(my_inbox.get_message(5) == None)

# Check whether the SMS message that we have read has changed the state.
test(my_inbox.get_unread_indexes() == [1,2])

# Delete 1st SMS message and check whether the 1st message is still the same.
# Check count.
my_inbox.delete(0)
Esempio n. 38
0
                # Print the loss averaged over the last N mini-batches
                N_minibatch_loss /= N
                with open(trace_file, 'a+') as f:
                    f.write(
                        str(epoch + 1) + ',' + str(minibatch_count) + ',' +
                        str(N_minibatch_loss) + '\n')

                # Add the averaged loss over N minibatches and reset the counter
                avg_minibatch_loss.append(N_minibatch_loss)
                N_minibatch_loss = 0.0

            # validate every 4 N minibatches. as validation more expensive now.
            if minibatch_count % (6 * N) == 0 and minibatch_count != 0:

                # validation
                losses, val_class, val_agg, conf = testing.test(
                    model, computing_device, val_loader, criterion)
                if losses[0] < current_best_val:
                    current_best_val = losses[0]
                    best_params = model.state_dict()
                    increasing_epochs = 0
                else:
                    increasing_epochs += 1
                testing.write_results(val_file, losses)
                testing.write_results(val_class_file, val_class)
                testing.write_results(val_agg_file, val_agg)
                torch.save(
                    conf, val_file + '-conf-' + str(epoch) + '-' +
                    str(minibatch_count))
        if increasing_epochs > early_stop_epochs:
            break
Esempio n. 39
0
    if char == "a":
        count += 1
print(count)

in a function named count_letters, and generalize it so that it accepts the string and the letter as arguments. Make the function return the number of characters, rather than print the answer. The caller should do the printing."""

def count_letters(strgiv, letter):
    count = 0
    for char in strgiv:
        if char == letter:
            count +=1
    return count

aaa = "banana is a fruit."

test(count_letters(aaa, "a")== 4)

#Excercise 4
"""Now rewrite the count_letters function so that instead of traversing the string, it repeatedly calls the find method, with the optional third parameter to locate new occurrences of the letter being counted."""

def count_letter2(strgiv, letter):
    count = 0
    result = strgiv.find(letter, 0)
    while result != -1:
        result = strgiv.find(letter, result+1)
        count +=1
    return count
test(count_letter2(aaa, "a") == 4)

#Excercise 5
"""Assign to a variable in your program a triple-quoted string that contains your favourite paragraph of text — perhaps a poem, a speech, instructions to bake a cake, some inspirational verses, etc.
Esempio n. 40
0
from testing import test
import string


def myreplace(old, new, s):
    """ Replace all occurrences of old with new in s. """
    if old in string.whitespace:
        return new.join(s.split())
    else:
        return new.join(s.split(old))


test(
    myreplace(",", ";", "this, that, and some other thing") ==
    "this; that; and some other thing")
test(
    myreplace(" ", "**", "Words will now      be  separated by stars.") ==
    "Words**will**now**be**separated**by**stars.")
Esempio n. 41
0
#!/usr/bin/env python4
from testing import test


def recursive_min(indata):
    smallest = None
    first_time = True

    for elem in indata:
        if type(elem) == type([]):
            elem = recursive_min(elem)

        if first_time and smallest == None:
            smallest = elem
            first_time = False

        if elem < smallest:
            smallest = elem

    return smallest


test(recursive_min([2, 9, [1, 13], 8, 6]) == 1)
test(recursive_min([2, [[100, 1], 90], [10, 13], 8, 6]) == 1)
test(recursive_min([2, [[13, -7], 90], [1, 100], 8, 6]) == -7)
test(recursive_min([[[-13, 7], 90], 2, [1, 100], 8, 6]) == -13)
Esempio n. 42
0
                    f.write(
                        str(epoch + 1) + ',' + str(minibatch_count) + ',' +
                        str(N_minibatch_loss) + '\n')

                # Add the averaged loss over N minibatches and reset the counter
                avg_minibatch_loss.append(N_minibatch_loss)
                N_minibatch_loss = 0.0

            # validate every 4 N minibatches. as validation more expensive now.
            if minibatch_count % (4 * N) == 0 and minibatch_count != 0:

                # validation
                total_val_loss, avg_val_loss, accuracy, precision, recall, balance, \
                conf = testing.test(
                    model,
                    computing_device,
                    val_loader,
                    criterion)
                if total_val_loss < current_best_val:
                    current_best_val = total_val_loss
                    best_params = model.state_dict()
                    increasing_epochs = 0
                else:
                    increasing_epochs += 1
                with open(val_file, 'a+') as f1:
                    f1.write(
                        str(total_val_loss) + ',' + str(avg_val_loss) + ',' +
                        str(accuracy) + ',' + str(precision) + ',' +
                        str(recall) + ',' + str(balance) + '\n')
                torch.save(
                    conf, val_file + '-conf' + str(epoch) + '-' +
Esempio n. 43
0
    def train(self):
        ''' Network '''
        batch_pred_feats, batch_pred_coords, self.final_state = self.LSTM('lstm', self.x, self.istate)

        ''' Loss: L2 '''
        loss = tf.reduce_mean(tf.square(self.y - batch_pred_coords)) * 100

        ''' regularization term: L2 '''
        regularization_term = tf.reduce_mean(tf.square(self.x[:, self.nsteps-1, 0:4096] - batch_pred_feats)) * 100

        ''' Optimizer '''
        optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss  + self.lamda * regularization_term) # Adam Optimizer

        ''' Summary for tensorboard analysis '''
        dataset_loss = -1
        dataset_loss_best = 100
        test_writer = tf.train.SummaryWriter('summary/test')
        tf.scalar_summary('dataset_loss', dataset_loss)
        summary_op = tf.merge_all_summaries()

        ''' Initializing the variables '''
        init = tf.initialize_all_variables()
        self.saver = tf.train.Saver()
        batch_states = np.zeros((self.batchsize, 2*self.len_vec))

        ''' Launch the graph '''
        with tf.Session() as sess:
            if self.restore_weights == True and os.path.isfile(self.rolo_current_save):
                sess.run(init)
                self.saver.restore(sess, self.rolo_current_save)
                print("Weight loaded, finetuning")
            else:
                sess.run(init)
                print("Training from scratch")

            self.load_training_list()

            for self.iter_id in range(self.n_iters):
                ''' Load training data & ground truth '''
                batch_id = self.iter_id - self.batch_offset
                [batch_vecs, batch_seq_names, batch_frame_ids] = self.load_batch(batch_id)
                if batch_vecs is False: continue

                batch_xs = batch_vecs
                batch_ys = batchload_gt_decimal_coords_from_VID(self.dataset_annotation_folder_path,
                                                                batch_seq_names,
                                                                batch_frame_ids,
                                                                offset = self.nsteps - 1)
                if batch_ys is False: continue

                ''' Reshape data '''
                batch_xs = np.reshape(batch_xs, [self.batchsize, self.nsteps, self.len_vec])
                batch_ys = np.reshape(batch_ys, [self.batchsize, 4])

                ''' Update weights by back-propagation '''
                sess.run(optimizer, feed_dict={self.x: batch_xs,
                                               self.y: batch_ys,
                                               self.istate: batch_states})

                if self.iter_id % self.display_step == 0:
                    ''' Calculate batch loss '''
                    batch_loss = sess.run(loss,
                                          feed_dict={self.x: batch_xs,
                                                     self.y: batch_ys,
                                                     self.istate: batch_states})
                    print("Batch loss for iteration %d: %.3f" % (self.iter_id, batch_loss))

                if self.display_regu is True:
                    ''' Caculate regularization term'''
                    batch_regularization = sess.run(regularization_term,
                                                    feed_dict={self.x: batch_xs,
                                                               self.y: batch_ys,
                                                               self.istate: batch_states})
                    print("Batch regu for iteration %d: %.3f" % (self.iter_id, batch_regularization))

                if self.display_coords is True:
                    ''' Caculate predicted coordinates '''
                    coords_predict = sess.run(batch_pred_coords,
                                              feed_dict={self.x: batch_xs,
                                                         self.y: batch_ys,
                                                         self.istate: batch_states})
                    print("predicted coords:" + str(coords_predict[0]))
                    print("ground truth coords:" + str(batch_ys[0]))

                ''' Save model '''
                if self.iter_id % self.save_step == 1:
                    self.saver.save(sess, self.rolo_current_save)
                    print("\n Model saved in file: %s" % self.rolo_current_save)

                ''' Validation '''
                if self.validate == True and self.iter_id % self.validate_step == 0:
                    dataset_loss = test(self, sess, loss, batch_pred_coords)

                    ''' Early-stop regularization '''
                    if dataset_loss <= dataset_loss_best:
                        dataset_loss_best = dataset_loss
                        self.saver.save(sess, self.rolo_weights_file)
                        print("\n Better Model saved in file: %s" % self.rolo_weights_file)

                    ''' Write summary for tensorboard '''
                    summary = sess.run(summary_op, feed_dict={self.x: batch_xs,
                                                              self.y: batch_ys,
                                                              self.istate: batch_states})
                    test_writer.add_summary(summary, self.iter_id)
        return