Ejemplo n.º 1
0
def train_agent(config, agent):
    device = torch.device('cuda' if config.USE_CUDA else 'cpu')
    torch.manual_seed(config.SEED)

    env = neo.NeodroidFormalWrapper(
        environment_name=config.ENVIRONMENT_NAME,
        connect_to_running=config.CONNECT_TO_RUNNING)
    env.seed(config.SEED)

    agent.build(env, device)

    listener = U.add_early_stopping_key_combination(agent.stop_training)

    listener.start()
    try:
        _trained_model, running_signals, running_lengths, *training_statistics = agent.train(
            env, config.ROLLOUTS, render=config.RENDER_ENVIRONMENT)
    finally:
        listener.stop()

    U.save_statistic(running_signals,
                     'running_signals',
                     LOG_DIRECTORY=C.LOG_DIRECTORY)
    U.save_statistic(running_lengths,
                     'running_lengths',
                     LOG_DIRECTORY=C.LOG_DIRECTORY)
    U.save_model(_trained_model, config)

    env.close()
Ejemplo n.º 2
0
def test_ddpg_agent(config):
  '''

:rtype: object
'''
  import gym

  device = torch.device('cuda' if config.USE_CUDA else 'cpu')

  env = gym.make(config.ENVIRONMENT_NAME)
  # env = NormaliseActionsWrapper(env)
  # env = neo.make('satellite',connect_to_running=False)

  agent = DDPGAgent(config)
  agent.build(env, device)
  listener = U.add_early_stopping_key_combination(agent.stop_training)

  listener.start()
  try:
    (actor_model, critic_model), stats = agent.train(
        env, config.ROLLOUTS, render=config.RENDER_ENVIRONMENT
        )
  finally:
    listener.stop()

  U.save_model(actor_model, config, name='actor')
  U.save_model(critic_model, config, name='critic')
Ejemplo n.º 3
0
def train_agent(config, agent):
    device = torch.device('cuda' if config.USE_CUDA else 'cpu')
    torch.manual_seed(config.SEED)

    env = U.BinaryActionEncodingWrapper(
        environment_name=config.ENVIRONMENT_NAME,
        connect_to_running=config.CONNECT_TO_RUNNING)
    env.seed(config.SEED)

    agent.build(env, device)

    listener = U.add_early_stopping_key_combination(agent.stop_training)

    listener.start()
    try:
        _trained_model, running_signals, running_lengths, *training_statistics = agent.train(
            env, config.ROLLOUTS, render=config.RENDER_ENVIRONMENT)
    except ValueError:
        running_signals = None
        running_lengths = None
        _trained_model = None
        print('Training procedure did not return as excepted')
    finally:
        listener.stop()

    U.save_statistic(running_signals,
                     'running_signals',
                     LOG_DIRECTORY=C.LOG_DIRECTORY)
    U.save_statistic(running_lengths,
                     'running_lengths',
                     LOG_DIRECTORY=C.LOG_DIRECTORY)
    U.save_model(_trained_model, config)

    env.close()
Ejemplo n.º 4
0
def train(event, context):
    """Trains our ML model."""
    utilities.download_directory(uri=f"s3://{DATA_BUCKET}/{DATA_PREFIX}",
                                 dst=DATA_TMP_DST)
    df = utilities.read_csv_directory(DATA_TMP_DST)
    print(f"SHAPE: {df.shape}")

    train, test = utilities.train_test_split(df)
    X_train, y_train = utilities.Xy_split(train, target='y')
    X_test, y_test = utilities.Xy_split(test, target='y')

    X_train = utilities.preprocessing.preprocess(X_train)
    X_test = utilities.preprocessing.preprocess(X_test)

    model = utilities.Model()
    model.fit(X_train, y_train)

    y_hat = model.predict(X_test)
    eval_results = utilities.evaluate(y_actual=y_test, y_predict=y_hat)

    utilities.save_model(obj=model, uri=f"s3://{DATA_BUCKET}/{MODEL_PREFIX}")
    return {
        "status": "success",
        "results": eval_results,
    }
Ejemplo n.º 5
0
def main():
    dataset = "context_finetuned"
    # dataset = "context_pretrained"
    data = getattr(utilities, f"read_{dataset}")()

    data["vocab"] = sorted(
        list(
            set([
                w for sent in data["train_sen"] + data["dev_sen"] +
                data["test_sen"] for w in sent
            ])))
    data["concept"] = sorted(
        list(
            set([
                concept for seq in data["train_concept"] +
                data["dev_concept"] + data["test_concept"] for concept in seq
            ])))
    data["classes"] = sorted(list(set(data["train_class"])))
    data["word_to_idx"] = {w: i for i, w in enumerate(data["vocab"])}
    data["idx_to_word"] = {i: w for i, w in enumerate(data["vocab"])}
    data["concept_to_idx"] = {w: i for i, w in enumerate(data["concept"])}
    data["idx_to_concept"] = {i: w for i, w in enumerate(data["concept"])}
    MAX_SENT_LEN = max([
        len(sent)
        for sent in data["train_sen"] + data["dev_sen"] + data["test_sen"]
    ])
    listFilter = [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23],
                  [27, 28, 29]]
    FILTER_NUM = [100, 100, 100]
    FILTER_PRE = 3
    FILTER_NUM_PRE = 100
    listModality = ["N", "D", "ND", "NK", "DK", "NDK"]
    for modality in listModality:
        for FILTERS in listFilter:
            params = {
                "MODALITY": modality,
                "DATASET": dataset,
                "SAVE_MODEL": True,
                "EARLY_STOPPING": True,
                "EPOCH": 500,
                "LEARNING_RATE": 0.001,
                "MAX_SENT_LEN": MAX_SENT_LEN,
                "BATCH_SIZE": 50,
                "WORD_DIM": 200,
                "VOCAB_SIZE": len(data["vocab"]),
                "CLASS_SIZE": len(data["classes"]),
                "CONCEPT_SIZE": len(data["concept"]),
                "FILTER_PRE": FILTER_PRE,
                "FILTER_NUM_PRE": FILTER_NUM_PRE,
                "FILTERS": FILTERS,
                "FILTER_NUM": FILTER_NUM,
                "DROPOUT_PROB": 0.5,
                "NORM_LIMIT": 3,
                "OPTIMIZATION": "adam"
            }
            model, dev_acc, dev_acc_each, listLog = train(data, params)
            if params["SAVE_MODEL"]:
                utilities.save_model(model, params)
            torch.cuda.empty_cache()
def main(args: Optional[argparse.Namespace] = None):
    return_value = True
    if args is None:
        args = parse_arguments()
        return_value = False

    caffe_model_path = os.path.join(args.model_dir, args.caffe_model)
    pytorch_model_out_path = os.path.join(args.model_dir, args.out_name)
    tmp_path = os.path.join(args.model_dir, 'temp.pt')

    print('Converting normalized Caffe weights to a PyTorch state_dict...')
    caffe_args = SimpleNamespace(model_caffemodel=caffe_model_path,
                                 output_path=tmp_path,
                                 caffe_proto='')
    caffemodel2pytorch.main(caffe_args)
    state_dict = torch.load(tmp_path)

    # reshape caffe bias to match the PyTorch one
    for learnables_key in state_dict:
        if 'bias' in learnables_key:
            state_dict[learnables_key] = state_dict[learnables_key].squeeze()

    print('Loading VGG19 PyTorch model...')

    # only features are needed
    net = torchvision.models.vgg19(pretrained=True).features

    # rename VGG19's feature layers, so that we can refer to them easily later
    new_layer_names = [
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
        'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
        'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
        'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
        'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4',
        'pool5'
    ]
    net = torch.nn.Sequential(OrderedDict(zip(new_layer_names, net)))

    # replace max pooling by average pooling
    for i in range(len(net)):
        if isinstance(net[i], torch.nn.MaxPool2d):
            net[i] = torch.nn.AvgPool2d(2, stride=2)

    print('Loading normalized weights into the PyTorch model...')
    net.load_state_dict(state_dict)

    # remove intermediate files
    os.remove(tmp_path)

    if return_value:
        return net
    else:
        print('Saving the converted model..')
        utilities.save_model(net, pytorch_model_out_path)
        return None
Ejemplo n.º 7
0
def main():
    print("Reading in the training data")
    train = util.get_train_df()
    
    #clean data
    
    
    #print("Extracting features and training model")
    #classifier = get_pipeline()
    classifier = RandomForestClassifier(n_estimators = 100)

    classifier.fit(train[0::,1::],train[0::,0])
    #print("Saving the classifier")
    util.save_model(classifier)
Ejemplo n.º 8
0
def regular_train_agent_procedure(agent_type, config, environment=None):
    if not environment:
        if '-v' in config.ENVIRONMENT_NAME:
            environment = gym.make(config.ENVIRONMENT_NAME)
        else:
            environment = BinaryActionEncodingWrapper(
                name=config.ENVIRONMENT_NAME,
                connect_to_running=config.CONNECT_TO_RUNNING)

    U.set_seeds(config.SEED)
    environment.seed(config.SEED)

    agent = agent_type(config)
    device = torch.device('cuda' if config.USE_CUDA else 'cpu')
    agent.build(environment, device)

    listener = U.add_early_stopping_key_combination(agent.stop_training)

    listener.start()
    try:
        models, stats = agent.train(environment,
                                    config.ROLLOUTS,
                                    render=config.RENDER_ENVIRONMENT)
    finally:
        listener.stop()

    identifier = count()
    if isinstance(models, list):
        for model in models:
            U.save_model(model,
                         config,
                         name=f'{type(agent)}-{identifier.__next__()}')
    else:
        U.save_model(models,
                     config,
                     name=f'{type(agent)}-{identifier.__next__()}')

    stats.save()

    environment.close()
Ejemplo n.º 9
0
def main():
    """
    This is the main program
    """

    # Initiating variables with parsed command line arguments

    in_arg = get_input_args()

    data_dir = in_arg.data_dir
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'
    save_dir = in_arg.save_dir
    arch = in_arg.arch.lower()
    epochs = in_arg.epochs
    hidden_units = in_arg.hidden_units
    learning_rate = in_arg.learning_rate
    gpu = in_arg.gpu

    # Correct the variables if necessary to avoid incorrect calculations
    # Collect error messages what variables have been changed to what values

    error_messages = []

    if (epochs <= 0):
        epochs = 1
        error_messages.append("epochs was corrected to 1")
    elif (epochs > 10):
        epochs = 10
        error_messages.append("epochs was corrected to 10")

    if (learning_rate <= 0.000001):
        learning_rate = 0.00001
        error_messages.append("learning_rate was corrected to 0.00001")
    elif (learning_rate >= 0.1):
        learning_rate = 0.01
        error_messages.append("learning_rate was corrected to 0.01")

    if (hidden_units < 4):
        hidden_units = 4
        error_messages.append("hidden_units was corrected to 4")

    if not save_dir:
        save_dir = os.getcwd()
        save = False
    elif save_dir == "/":  # slash means that the new trained classified should be stored in the current directory
        save = True
        save_dir = os.getcwd()
    else:
        save = True


    if path.exists(data_dir) and path.exists(train_dir) and path.exists(valid_dir) \
    and path.exists(test_dir) and path.exists(save_dir): # check if all paths are correct

        if (arch in "alexnet,vgg16,densenet161"
            ):  # check if the stated architecture is supported

            # define the data transforms of the train data
            data_transforms_train = transforms.Compose([
                transforms.RandomRotation(30),
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ])

            # define the data transforms of the validation data
            data_transforms_valid = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ])

            # load the image data of the train and validation set and perform transforms
            train_data = datasets.ImageFolder(train_dir,
                                              transform=data_transforms_train)
            valid_data = datasets.ImageFolder(valid_dir,
                                              transform=data_transforms_valid)

            # load the transformed image data into loader variables by batches
            trainloader = DataLoader(train_data, batch_size=64, shuffle=True)
            validloader = DataLoader(valid_data, batch_size=64, shuffle=True)

            # download the pretrained version of the selected model defined by the parser variable 'arch'
            model = getattr(models, arch)
            model = model(pretrained=True)

            # freeze the parameters of the model as only classifier will be updated
            for param in model.parameters():
                param.requires_grad = False

            # classifier layer will be fully connected so get the input units first
            if (arch == "vgg16"):
                num_in_features = model.classifier[0].in_features
            elif (arch == "densenet161"):
                num_in_features = model.classifier.in_features
            elif (arch == "alexnet"):
                num_in_features = model.classifier[1].in_features

            # define the new classifier und replace the current one
            new_classifier = nn.Sequential(
                nn.Linear(num_in_features, hidden_units), nn.ReLU(),
                nn.Dropout(0.5), nn.Linear(hidden_units,
                                           int(hidden_units / 4)), nn.ReLU(),
                nn.Dropout(0.5), nn.Linear(int(hidden_units / 4), 102),
                nn.LogSoftmax(dim=1))

            model.classifier = new_classifier

            # use GPU power if available and model to it
            if gpu:
                device = torch.device(
                    "cuda" if torch.cuda.is_available() else "cpu")
            else:
                device = "cpu"

            model.to(device)

            # define the loss function and the optimizer
            criterion = nn.NLLLoss()
            optimizer = optim.Adam(model.classifier.parameters(),
                                   lr=learning_rate)

            print_every = 50
            train_len = len(trainloader)
            valid_len = len(validloader)

            # start training and validation epochs
            for epoch in range(epochs):
                epoch += 1
                last_print = 0
                running_loss = 0

                for batch, (inputs, labels) in enumerate(trainloader):
                    batch += 1

                    inputs, labels = inputs.to(device), labels.to(device)

                    # for each batch gradients should be zeroed
                    optimizer.zero_grad()

                    # perform feed-forward and calculate loss through back propagation
                    logps = model.forward(inputs)
                    loss = criterion(logps, labels)
                    loss.backward()
                    optimizer.step()

                    running_loss += loss.item()

                    print(f"Epoch {epoch}/{epochs}, batch {batch}/{train_len}")

                    # do validation on the test set after defined set of batches

                    if (((batch) % print_every == 0) &
                        (batch >= print_every)) or (batch == train_len):
                        valid_loss = 0
                        accuracy = 0

                        # put into evaluation mode and deactivate gradients for feed-forward validation
                        model.eval()
                        with torch.no_grad():
                            # iterate through the valid data set
                            for inputs, labels in validloader:
                                inputs, labels = inputs.to(device), labels.to(
                                    device)
                                logps = model.forward(inputs)

                                # calculate the losses
                                batch_loss = criterion(logps, labels)
                                valid_loss += batch_loss.item()

                                # calculate accuracy and return the category with the top probability
                                # then compare with the labels and calculate the mean of the right matches
                                ps = torch.exp(logps)
                                top_p, top_class = ps.topk(1, dim=1)
                                equals = top_class == labels.view(
                                    *top_class.shape)
                                accuracy += torch.mean(
                                    equals.type(torch.FloatTensor)).item()

                        # after each train and validation circle defined by the number of batches before print the statistics
                        if (batch == train_len):
                            print(
                                f"Train loss: {running_loss/(train_len - last_print):.3f}.. ",
                                end='')
                        else:
                            last_print += print_every  # print statistics after each epoch
                            print(
                                f"Train loss: {running_loss/print_every:.3f}.. ",
                                end='')

                        print(f"Valid loss: {valid_loss/valid_len:.3f}.. ",
                              end='')

                        print(f"Accuracy: {accuracy/valid_len:.3f}")
                        running_loss = 0

                        # switch back to train mode
                        model.train()

            # if save path is defined
            if (save == True):
                save_model(model, save_dir, train_data.class_to_idx, arch,
                           new_classifier)

        else:
            print("Architecture chosen not supported!")

    else:
        print("Incorrect directories - please check!")

    # print out error messages if any
    if (len(error_messages)):
        for v in error_messages:
            print(v)
Ejemplo n.º 10
0
logging.info("*** Describe training data ***")
ut.describe_data(X_train, Y_train)
logging.info("*** Describe validation data ***")
ut.describe_data(X_val, Y_val)

## Create model
tstart = time.time()
Y_train_c = to_categorical(Y_train, num_classes=2)
Y_val_c = to_categorical(Y_val, num_classes=2)
if args.model_name == "mnist":
    model, history = mtt.mnist_cnn(X_train,
                                   Y_train_c,
                                   X_val,
                                   Y_val_c,
                                   batch_size=128,
                                   epochs=int(args.epochs))
elif args.model_name == "too_simple":
    model, history = mtt.too_simple(X_train,
                                    Y_train_c,
                                    X_val,
                                    Y_val_c,
                                    batch_size=128,
                                    epochs=int(args.epochs))
tend = time.time()
logging.info("Time to fit model: {0:.1f} s.".format(tend - tstart))

## Save model
ut.save_model(args.save_dir, model, history, X_val, Y_val_c)

logging.info("Finished cnn.py")
Ejemplo n.º 11
0
 def save(self, C):
     U.save_model(self._policy, C)
Ejemplo n.º 12
0
            train_accuracy = sess.run(acc, feed_dict=feed_dict_train)
            # train_accuracy, summary = sess.run([acc_1, merged_train], feed_dict=feed_dict_train)
            # writer.add_summary(summary, epoch)

            print_and_log_timestamp("epoch {}/{}: train accuracy is {}",
                                    epoch + 1, NUM_EPOCHS, train_accuracy)
            if train_accuracy < MIN_ACCURACY_DISCARD and epoch >= MIN_EPOCHS_DISCARD:
                print_and_log_timestamp("model is not learning...exit")
                exit(1)
            if train_accuracy > best_test_accuracy:
                test_accuracy, test_summary = sess.run(
                    [acc, merged_test], feed_dict=feed_dict_test)
                writer.add_summary(test_summary, epoch)
                print_and_log_timestamp("epoch {}/{}: test accuracy is {}",
                                        epoch + 1, NUM_EPOCHS, test_accuracy)
                if test_accuracy > best_test_accuracy:
                    best_test_accuracy = test_accuracy  # updating best test accuracy
                    save_model(saver, sess)  # save model
                if train_accuracy - test_accuracy > DROPOUT_UPDATE_MIN_GAP and train_dropout > MIN_DROPOUT:
                    train_dropout = train_dropout * DROPOUT_UPDATE_FACTOR  # updating dropout
                    print_and_log_timestamp(
                        "**** updating dropout value to {} ****",
                        train_dropout)

        writer.close()
    print_and_log('\n')

print_and_log_timestamp(" END session! {}",
                        datetime.datetime.now().strftime("%H:%M:%S"))
copyfile(CM.TEMP_LOG_FILE_PATH, CM.LOG_FILE_PATH)
Ejemplo n.º 13
0
 def save(self, C):
   U.save_model(self._actor, C, 'actor')
   U.save_model(self._critic, C, 'policy')
Ejemplo n.º 14
0
    return -1.0

try:
#def calculate():
    if cuda.is_available():
        print('cuda is available!')

    # Build the model
  #  self.conv1=nn.DataParallel(self.conv1)
    encoder=torch.load('model8/encoder')
 #   encoder=EncoderRNN()
   # encoder =nn.DataParallel(encoder)
  #  decoder = DecoderRNN()
    decoder=torch.load('model8/decoder')
    if cuda.is_available():
        encoder.cuda()
        decoder.cuda()
#        encoder = nn.DataParallel(encoder)
        #encoder=nn.DataParallel(encoder)
#        decoder=nn.DataParallel(decoder,dim=1,device_ids=[0,1])
    train_iters(encoder, decoder)

    save_model(encoder, decoder)
    
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()
except Exception as e:  # если вдруг произошла какя-то ошибка при обучении, сохраняем модель
   print(e)
   save_model(encoder, decoder)
#calculate()
Ejemplo n.º 15
0
    n_classes = 10

    epochs = 20  # int(input("Epochs: "))
    batch_size = 1000  # int(input("batch_size: "))
    lr = 0.01  # float(input("lr: "))

    model = Classifier(input_shape, conv, fc, n_classes).to(device)
    criterion = nn.CrossEntropyLoss().to(device)

    # optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    optimizer = optim.Adagrad(model.parameters(), lr=lr)

    tr.train(model, dataset, criterion, optimizer, device, epochs, batch_size)

    print("Saving Black Box:")
    utilities.save_model(model)

if args['sub']:
    bb = input("Black box model name: ")

    oracle_model = torch.load("saved_models/" + bb)

    test_dataset = dataset["eval"]
    dataset = dataset["train"]

    num_points = 150  # int(input("Num Points Initial: "))
    idxs = []
    for _ in range(num_points):
        while True:
            idx = random.randint(0, len(dataset) - 1)
            if idx not in idxs:
Ejemplo n.º 16
0
 def save(self, C):
     U.save_model(self._actor_critic, C)
Ejemplo n.º 17
0
 def save(self, C):
   U.save_model(self._value_model, C)
Ejemplo n.º 18
0
def main():
    #print(f'datetime.datetime={str(datetime.datetime)}, datetime.date={str(datetime.date)}, strftime():{datetime.datetime.now().strftime("%Y%d%H%M%S")}')
    thrash = True
    print('tf version:{0}'.format(tf.VERSION))
    print('tf.keras version:{0}'.format(tf.keras.__version__))
    start_time = datetime.datetime.now().strftime("%Y%d%H%M%S")
    flags, unparsed = ut.parseArgs()
    print(flags)
    SAMPLE_FILE = flags.train_data_path + flags.sample + '.' + flags.img_file_extension
    img = ut.read_image(filename=SAMPLE_FILE, show=False)
    img = np.array(img)
    if thrash == True:
        img = ut.thrash_img(img)

    IMG_SHAPE=img.shape
    (x_train, y_train), (x_test, y_test)=ut.load_data(numclasses=flags.numclasses, train_path=flags.train_data_path, test_path=flags.test_data_path, onehot=True, extension=flags.img_file_extension)

    print('IMG_SHAPE:{0},  y_train shape:{1}'.format(IMG_SHAPE,y_train[0].shape))

    if flags.load_model:
        model = ut.load_stored_model(name=flags.model_dir + flags.model_name)
    elif flags.model == 'dense':
        model = ut.make_dense_model(flags=flags)
    elif flags.model  == 'conv2d':
        model = ut.make_convnet_model(flags=flags, shape=IMG_SHAPE)
    else:
        print('No model, no hope. Quitting...')
        return

    if flags.load_data:
        model = ut.load_stored_data(model=model, date_file_name=flags.data_dir + flags.data_name)

    print('Saving in {0}'.format(flags.tb_dir + start_time))
    tensorboard = TensorBoard(log_dir=flags.tb_dir + '{0}'.format(start_time))

    adam=tf.keras.optimizers.Adam(lr=flags.learning_rate)

    model.compile(optimizer=adam,
                  loss=flags.loss,
                  metrics=[flags.metric]
                  )

    if flags.train == True:
        print('Training...')
        scores = []
        for epoch in range(flags.epochs):
            print('Epoch:{0} of {1}'.format(epoch+1, flags.epochs))
            n = len(x_train)
            for batch in range(0,len(x_train), flags.batch_size):
                print('Batch {0} of {1}, epoch {2} of {3}.'.format(batch+1,n+1, epoch+1, flags.epochs))
                bunch_x, bunch_y = x_train[batch:batch+flags.batch_size], y_train[batch:batch+flags.batch_size]
                if len(bunch_x) < flags.batch_size: # skip partial batches
                    print('Skipping {0} samples..'.format(len(bunch_x)))
                    continue

                xs = []
                ys = []
                print("Iterating {0} samples".format(len(bunch_x)))
                for datum in range(len(bunch_x)):
                    file = bunch_x[datum]
                    img = ut.read_image(filename=flags.train_data_path+file, show=False)
                    img=np.array(img)
                    if thrash == True:
                        img = ut.thrash_img(img)
                    xs.append(img)
                    ys.append(bunch_y[datum])

                X= np.stack(xs, axis=0)
                Y= np.stack(ys, axis=0)

                score_before = model.evaluate(x=X,y=Y, batch_size=flags.batch_size)

                _ = model.fit(x=X, y=Y, shuffle=flags.shuffle, callbacks=[tensorboard])

                score_after = model.evaluate(x=X,y=Y, batch_size=flags.batch_size)

                if score_before == score_after:
                    print("Scores before and after training are identical")

                scores.append(score_after)
                if epoch == 0 and batch == 0:
                    model.summary()

                print('Score:{0}'.format(score_after))

            loss,acc = np.array([s[0] for s in scores]), np.array([s[1] for s in scores])
        print("Average loss:{0}  Average accuracy:{1}%".format(np.mean(loss), 100*np.mean(acc)))

    if flags.save_model:
        model_name = flags.model_name if flags.model_name != None else start_time
        ut.save_model(model, flags.model_dir+model_name)
        print('Saved model to disk, json in {0}'.format(flags.model_dir + model_name + ".json"))

    if flags.save_data:
        data_name = flags.data_name if flags.data_name != None else start_time
        model.save_weights(flags.data_dir + data_name + ".h5")
        print('Saved data to disk in {0}'.format(flags.model_dir + data_name + ".h5"))

    test_scores = []
    predictions = []
    if flags.evaluate or flags.predict:
        n = len(x_test)
        nTotal = 0
        sums_array = None
        for batch in range(0, len(x_test), flags.batch_size):
            print('Batch {0} of {1}.'.format(batch+1, n+1))
            bunch_x, bunch_y = x_test[batch:batch + flags.batch_size], y_test[batch:batch + flags.batch_size]
            if len(bunch_x) < flags.batch_size:  # skip partial batches
                print('Skipping {0} samples..'.format(len(bunch_x)))
                continue

            xs = []
            ys = []
            for d in range(len(bunch_x)):
                file = bunch_x[d]
                img = ut.read_image(filename=flags.test_data_path + file, show=False)
                img = np.array(img)
                if thrash == True:
                    img = ut.thrash_img(img)
                xs.append(img)
                ys.append(bunch_y[d])

            X = np.stack(xs, axis=0)
            Y = np.stack(ys, axis=0)

            if flags.evaluate:
                score = model.evaluate(x=X, y=Y, batch_size=flags.batch_size)
                test_scores.append(score)
                print('Test score:{0}'.format(score))


            if flags.predict:
                prediction = model.predict(X, verbose=2)
                processed_predictions = ut.process_predictions(prediction, Y)

                for pp in processed_predictions:
                    if sums_array is None:
                        sums_array = np.zeros_like(pp)
                    sums_array = np.add(sums_array, pp)
                    nTotal = nTotal+1

                pass

        if flags.predict:
            sums_array /= nTotal

        if predictions != None:
            pass


        print('Average score:{0},{1}'.format(np.mean([test_scores[i][0] for i in range(len(test_scores))]),np.mean([test_scores[i][1] for i in range(len(test_scores))])))

        if flags.show_results:
            y_axis = np.arange(0, 1.0, 1.0/float(len(sums_array)))
            plt.plot(y_axis,sums_array)
            plt.show()

    pass