コード例 #1
0
ファイル: main.py プロジェクト: victorialin898/petfinder
def main():
    # Sets up the train and test directories according to flow_from_directory. Aborts if they are already present
    create_sets(ARGS.data, train_ratio=0.9)

    # Our datasets here
    datasets = Datasets(ARGS.data)

    model = cnn()
    checkpoint_path = "./your_model_checkpoints/"

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Compile model graph
    model.compile(optimizer='sgd',
                  loss='sparse_categorical_crossentropy',
                  metrics=["sparse_categorical_accuracy"])

    if ARGS.evaluate:
        test(model, datasets)
    else:
        train(model, datasets, checkpoint_path)
コード例 #2
0
    def __init__(self,batch_size, model_name="PaintsTensorFlowDraftModel"):
	self.batch_size = batch_size
        self.data_sets = Datasets(self.batch_size)
        self.model_name = model_name
        # utils.initdir(self.model_name)

        self.global_steps = tf.compat.v1.train.get_or_create_global_step()
        self.epochs = tf.Variable(0, trainable=False, dtype=tf.int32)

        self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.5, beta_2=0.9)
        self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.5, beta_2=0.9)

        self.generator = Generator(name="PaintsTensorFlowDraftNet")  # for first time of training
        self.generator = tf.keras.models.load_model(__SAVED_MODEL_PATH__)  # to resume training 
        
        self.discriminator = Discriminator()
コード例 #3
0
def main():
    datasets = Datasets("data/imagenet")
    model = Model()
    model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 1)))
    model.summary()

    model.compile(optimizer=model.optimizer, loss=model.loss_fn)

    train(model, datasets)
コード例 #4
0
def main():
    """ Main function. """

    checkpoint_path = "./saved_models/"
    model_final = None
    #makes checkpoint folder if it doesn't exist
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    
    #Loading model if user requested it
    if ARGS.load_checkpoint is not None:
        if ARGS.load_checkpoint.endswith('.h5') and os.path.isfile(ARGS.load_checkpoint):
                print("Found an existing model! Loading it...")
                model_final = tf.keras.models.load_model(ARGS.load_checkpoint)
                model_final.summary()
        else:
            print("Error: Pass in h5 file of the model!!")
            return 
    else:
        ### Load the data
        datasets = Datasets(ARGS.data)

        vggmodel = VGG16(weights='imagenet', include_top=True)
        vggmodel.summary()   

        ### Freezes every layeer in vggmodel
        for layers in (vggmodel.layers)[:15]:
            print(layers)
            layers.trainable = False

        X= vggmodel.layers[-2].output

        #A connected layer is added for predictions
        predictions = Dense(hp.num_classes, activation="softmax")(X)
        model_final = Model(input = vggmodel.input, output = predictions)
        opt = Adam(lr= hp.learning_rate)

        model_final.compile(loss = keras.losses.categorical_crossentropy, optimizer = opt, metrics=["accuracy"])
        model_final.summary()
        
        #Training configurations are set and training is performed through fit_generator.
        checkpoint = ModelCheckpoint(checkpoint_path + "rcnn_vgg16_1.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
        early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')

        model_final.fit_generator(generator= datasets.train_data, steps_per_epoch= 10, epochs= 1000, validation_data= datasets.test_data, validation_steps=2, callbacks=[checkpoint,early_stop])
コード例 #5
0
def main():
    """ Main function. """

    # This is for training
    time_now = datetime.now()
    timestamp = time_now.strftime("%m%d%y-%H%M%S")
    init_epoch = 0

    # If paths provided by program arguments are accurate, then this will
    # ensure they are used. If not, these directories/files will be
    # set relative to the directory of run.py
    # Not quite sure what this is...?????
    if os.path.exists(ARGS.data):
        ARGS.data = os.path.abspath(ARGS.data)

    # Run script from location of run.py
    os.chdir(sys.path[0])

    datasets = Datasets(ARGS.data)
    model = YourModel()
    model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 3)))
    checkpoint_path = "checkpoints" + os.sep + \
        "your_model" + os.sep + timestamp + os.sep
    logs_path = "logs" + os.sep + "your_model" + \
        os.sep + timestamp + os.sep

    # Compile model graph
    model.compile(
        optimizer=model.optimizer,
        loss=model.loss_fn,
        metrics=["sparse_categorical_accuracy"])

    if ARGS.weights is None:
        # We will train model to obtain weights if we don't have weights
        # Print summary of model
        model.summary()
        # Make checkpoint directory if needed
        if not os.path.exists(checkpoint_path):
            os.makedirs(checkpoint_path)
        train(model, datasets, checkpoint_path, logs_path, init_epoch)
        evaluation = model.evaluate( x=datasets.test_data, verbose=1, batch_size = hp.batch_size)
        print(evaluation)
    else:
        model.load_weights(ARGS.weights, by_name = False)
        test(model)
コード例 #6
0
def main():
    """ Main function. """

    # Map datasets to its path
    datasets_path_dict = {
                            'fer': os.getcwd() + '/../data/fer2013.csv'
                        }

    datasets = Datasets(datasets_path_dict[ARGS.data], ARGS.data)

    model = Model()
    
    # Different model input size depending on the dataset. Default is fer2013.
    if ARGS.data == 'fer':
        model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 1)))

    checkpoint_path = "./your_model_checkpoints/"
    model.summary()

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Compile model graph
    model.compile(
        optimizer=model.optimizer,
        loss=model.loss_fn,
        metrics=["sparse_categorical_accuracy"])

    if not ARGS.evaluate:
        train(model, datasets, checkpoint_path)
    
    if ARGS.live:
        live(model)
    if ARGS.visualization:
        prediction_visualization(model, datasets)

    test(model, datasets)
コード例 #7
0
def main():
    """ Main function. """

    datasets = Datasets(ARGS.data, ARGS.task)

    if ARGS.task == '1':
        model = YourModel()
        model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 3)))
        checkpoint_path = "./your_model_checkpoints/"
        model.summary()
    else:
        model = VGGModel()
        checkpoint_path = "./vgg_model_checkpoints/"
        model(tf.keras.Input(shape=(224, 224, 3)))
        model.summary()

        # Don't load pretrained vgg if loading checkpoint
        if ARGS.load_checkpoint is None:
            model.load_weights(ARGS.load_vgg, by_name=True)

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Compile model graph
    model.compile(optimizer=model.optimizer,
                  loss=model.loss_fn,
                  metrics=["sparse_categorical_accuracy"])

    if ARGS.evaluate:
        test(model, datasets.test_data)
    else:
        train(model, datasets, checkpoint_path)

    model.save('my_model')
コード例 #8
0
class PaintsTensorFlowDraftModelTrain:
    def __init__(self,batch_size, model_name="PaintsTensorFlowDraftModel"):
	self.batch_size = batch_size
        self.data_sets = Datasets(self.batch_size)
        self.model_name = model_name
        # utils.initdir(self.model_name)

        self.global_steps = tf.compat.v1.train.get_or_create_global_step()
        self.epochs = tf.Variable(0, trainable=False, dtype=tf.int32)

        self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.5, beta_2=0.9)
        self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.5, beta_2=0.9)

        self.generator = Generator(name="PaintsTensorFlowDraftNet")  # for first time of training
        self.generator = tf.keras.models.load_model(__SAVED_MODEL_PATH__)  # to resume training 
        
        self.discriminator = Discriminator()

    def __discriminator_loss(self, real, fake):
        SCE = tf.nn.sigmoid_cross_entropy_with_logits
        self.real_loss = SCE(tf.ones_like(real), logits=real)
        self.fake_loss = SCE(tf.zeros_like(fake), logits=fake)
        loss = self.real_loss + self.fake_loss
        return loss

    def __generator_loss(self, disOutput, output, target):
        SCE = tf.nn.sigmoid_cross_entropy_with_logits
        self.gan_loss = SCE(tf.ones_like(disOutput), logits=disOutput)
        self.image_loss = tf.reduce_mean(tf.abs(target - output)) * l1_scaling
        loss = self.image_loss + self.gan_loss
        return loss

    def __pred_image(self, model, image, line, hint, epoch=None):
        global_steps = self.global_steps.numpy()
        pred_image = model.predict([line, hint])

        zero_hint = tf.ones_like(hint)
        zero_hint += 1
        pred_image_zero = model.predict([line, zero_hint])

        dis_fake = self.discriminator(pred_image, training=False)
        loss = self.__generator_loss(dis_fake, pred_image, image)


        loss = "{:0.05f}".format(loss).zfill(7)
        print("Epoch:{} GS:{} LOSS:{}".format(epoch, global_steps, loss))

        hint = np.array(hint)
        hint[hint > 1] = 1

        line_image = np.concatenate([line, line, line], -1)
        save_img = np.concatenate([line_image, hint, pred_image_zero, pred_image, image], 1)
        save_img = utils.convert2uint8(save_img)
        
    def training(self, image_path, line_path, loadEpochs=0):

        images, lines = self.data_sets.get_image_and_line(image_path, line_path)
        batch_steps = len(images) / self.batch_size
        def fast_fetch(size, Traindata):        
            if (size==(self.batch_size,128,128,3)):
                x=np.zeros(size)
                for i in range(0,self.batch_size):
                    t = Traindata[i]
                    x[i] = np.asarray(t, np.float32)        
                return x
            else:
                x=np.zeros(size)
                for i in range(self.batch_size):
                    t = Traindata[i]
                    x[i,:,:,0] = np.asarray(t,np.float32).reshape(128,128)
                return x
            
        for epoch in range(loadEpochs):

            print("GS: ", self.global_steps.numpy(), "Epochs:  ", self.epochs.numpy())
            for batch in range(batch_steps):
                print ('Batch no:'+ batch, end='\r')
                trainData = self.data_sets._next(images[(batch*self.batch_size):(batch*self.batch_size)+self.batch_size], lines[(batch*self.batch_size):(batch*self.batch_size)+self.batch_size])
                image = tf.convert_to_tensor(fast_fetch((self.batch_size,128,128,3), trainData[0]), dtype=tf.float32, dtype_hint=None, name=None)        
                line = tf.convert_to_tensor(fast_fetch((self.batch_size,128,128,1), trainData[1]), dtype=tf.float32, dtype_hint=None, name=None)
                hint = tf.convert_to_tensor(fast_fetch((self.batch_size,128,128,3), trainData[2]), dtype=tf.float32, dtype_hint=None, name=None)
                del (trainData)
                
                with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
                    pred_image = self.generator(inputs=[line, hint], training=True)
                    dis_real = self.discriminator(inputs=image, training=True)
                    dis_fake = self.discriminator(inputs=pred_image, training=True)
                    generator_loss = self.__generator_loss(dis_fake, pred_image, image)
                    discriminator_loss = self.__discriminator_loss(dis_real, dis_fake)
                discriminator_gradients = discTape.gradient(discriminator_loss, self.discriminator.variables)
                generator_gradients = genTape.gradient(generator_loss, self.generator.variables)
                self.discriminator_optimizer.apply_gradients(zip(discriminator_gradients, self.discriminator.variables))
                self.generator_optimizer.apply_gradients(zip(generator_gradients, self.generator.variables))
                gs = self.global_steps.numpy()
                del(image)
                del(line)
                del(hint)

            self.epochs = self.epochs + 1
            print ('LOSS_G {}\nLOSS_G_Image {}\nLOSS_G_GAN {} \nLOSS_D {}\nLOSS_D_Real {}\nLOSS_D_Fake {}'.format(generator_loss,self.image_loss,self.gan_loss,discriminator_loss,self.real_loss,self.fake_loss))
        self.generator.summary()
    def save_model(self, saved_path):
        self.generator.save(saved_path, include_optimizer=False)  # for keras Model
コード例 #9
0
    from preprocess import Datasets
    import torch.utils.data
    from preprocess.transforms import SpectrogramTransform
    from preprocess.fusion import separate_sensors_collate
    from param import fs, duration_window, duration_overlap, spectro_batch_size

    spectrogram_transform = SpectrogramTransform(["Acc_norm", "Gyr_y"], fs, duration_window, duration_overlap,
                                                 spectro_batch_size, interpolation='linear', log_power=True, out_size=(48,48))
    collate_fn = separate_sensors_collate

    try :  # do not reload the datasets if they already exist
        train_dataset
        val_dataset

    except NameError:
        train_dataset = Datasets.SignalsDataSet(mode='train', split='balanced', comp_preprocess_first=True, transform=spectrogram_transform)
        val_dataset =   Datasets.SignalsDataSet(mode='val',   split='balanced', comp_preprocess_first=True, transform=spectrogram_transform)

    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, collate_fn=collate_fn, num_workers=0, shuffle=True)
    val_dataloader   = torch.utils.data.DataLoader(val_dataset,   batch_size=64, collate_fn=collate_fn, num_workers=0)

    str_result = ""


    model = DecorrelatedNet(input_shape=(1,48,48), signals_list=["Acc_norm", "Gyr_y"], loss_coef=0.1,
                            plot_conflict=True, cca_type='deep')

    model.to(device)
    model.adapt_CCA(train_dataloader)

    _, _, _, val_F1 = model.train_process(train_dataloader, val_dataloader, maxepochs=10)
コード例 #10
0
ファイル: run.py プロジェクト: player-eric/CoverYourNose
def main():
    """ Main function. """

    time_now = datetime.now()
    timestamp = time_now.strftime("%m%d%y-%H%M%S")
    init_epoch = 0

    # If loading from a checkpoint, the loaded checkpoint's directory
    # will be used for future checkpoints
    if ARGS.load_checkpoint is not None:
        ARGS.load_checkpoint = os.path.abspath(ARGS.load_checkpoint)

        # Get timestamp and epoch from filename
        regex = r"(?:.+)(?:\.e)(\d+)(?:.+)(?:.h5)"
        init_epoch = int(re.match(regex, ARGS.load_checkpoint).group(1)) + 1
        timestamp = os.path.basename(os.path.dirname(ARGS.load_checkpoint))

    # If paths provided by program arguments are accurate, then this will
    # ensure they are used. If not, these directories/files will be
    # set relative to the directory of run.py
    if os.path.exists(ARGS.data):
        ARGS.data = os.path.abspath(ARGS.data)
    if os.path.exists(ARGS.load_vgg):
        ARGS.load_vgg = os.path.abspath(ARGS.load_vgg)

    # Run script from location of run.py
    os.chdir(sys.path[0])

    datasets = Datasets(ARGS.data, ARGS.task)

    if ARGS.task == '1':
        model = YourModel()
        model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 3)))
        checkpoint_path = "checkpoints" + os.sep + \
            "your_model" + os.sep + timestamp + os.sep
        logs_path = "logs" + os.sep + "your_model" + \
            os.sep + timestamp + os.sep

        # Print summary of model
        model.summary()
    elif ARGS.task == '2':
        model = VGGModel()
        checkpoint_path = "checkpoints" + os.sep + \
            "vgg_model" + os.sep + timestamp + os.sep
        logs_path = "logs" + os.sep + "vgg_model" + \
            os.sep + timestamp + os.sep
        model(tf.keras.Input(shape=(224, 224, 3)))

        # Print summaries for both parts of the model
        model.vgg16.summary()
        model.head.summary()

        # Load base of VGG model
        model.vgg16.load_weights(ARGS.load_vgg, by_name=True)

    elif ARGS.task == 'mobileNet':
        model = mobileNet()
        checkpoint_path = "checkpoints" + os.sep + \
            "mobileNet" + os.sep + timestamp + os.sep
        logs_path = "logs" + os.sep + "mobileNet" + \
            os.sep + timestamp + os.sep
        model(tf.keras.Input(shape=(224, 224, 3)))
        model.summary()

    # Load checkpoints
    if ARGS.load_checkpoint is not None:
        if ARGS.task == '1':
            model.load_weights(ARGS.load_checkpoint, by_name=False)
        else:
            model.head.load_weights(ARGS.load_checkpoint, by_name=False)

    # Make checkpoint directory if needed
    if not ARGS.evaluate and not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Compile model graph
    model.compile(optimizer=model.optimizer,
                  loss=model.loss_fn,
                  metrics=["sparse_categorical_accuracy"])

    if ARGS.evaluate:
        test(model, datasets.test_data)

        # TODO: change the image path to be the image of your choice by changing
        # the lime-image flag when calling run.py to investigate
        # i.e. python run.py --evaluate --lime-image test/Bedroom/image_003.jpg
        path = ARGS.data + os.sep + ARGS.lime_image
        LIME_explainer(model, path, datasets.preprocess_fn)
    else:
        train(model, datasets, checkpoint_path, logs_path, init_epoch)
コード例 #11
0
    import sys
    sys.path.append("..")

import numpy as np
import torch
import scipy.signal, scipy.interpolate, scipy.ndimage


from param import classes_names, fs, duration_window, duration_overlap, duration_segment, spectro_batch_size
from preprocess import Datasets

if __name__ == "__main__":
    import matplotlib.pyplot as plt
    n_classes = len(classes_names)
    # We will need this for the tests
    DS = Datasets.SignalsDataSet(mode='train', split='balanced', comp_preprocess_first=False)


#%% transform functions

"""In all following functions, the input parameter (data) is, by default,
 a dict of numpy arrays, containing signal names (eg. "Gyr_z") as keys, and 1-dimensional
 arrays as values

Most of this part contains basic visualizations to make sure the preprocessing is correct"""




class TemporalTransform():
    """  create the base transform to use to each element of the data
コード例 #12
0
if __name__ == "__main__":
    """
        test the online (comp_preprocess_first=False) preprocessing
        load the Train Set
        apply the preprocess on the first 5 samples

    """

    print(
        '\n\n *** test the online (comp_preprocess_first=False) preprocessing *** \n'
    )

    n_classes = len(classes_names)
    # We will need this for the tests
    DS = Datasets.SignalsDataSet(mode='train',
                                 split='balanced',
                                 comp_preprocess_first=False)

    flag_debug = True
    example_signals = ["Acc_norm", "Gyr_y", "Mag_norm"]
    n_signals = len(example_signals)

    # ---------------------- temporal ----------------------------
    temporal_transform = TemporalTransform(example_signals)
    DS.transform = temporal_transform
    dataloader = torch.utils.data.DataLoader(
        DS, batch_size=5)  # instances will be loaded 5 by 5

    plt.figure()

    #                        axis = time
コード例 #13
0
def create_dataloaders(split,
                       data_type,
                       fusion_type,
                       signals_list,
                       log_power="missing",
                       out_size="missing",
                       interpolation="missing",
                       comp_preprocess_first=True,
                       use_test=False):
    """
    generate the training, validation, and test sets with the given parameters,
    and returns the corresponding dataloaders

    Parameters
    ----------
    see above for inut type and constraints
    - log_power, out_size, and interpolation are only mandatory when
        data_type == "spectrogram", and can be left ignored otherwise
    - comp_preprocess_first is False by default
    - use_test (bool): if False, do not generate test dataloader. Default: False


    Returns
    -------
    train_dataloader, val_dataloader, test_dataloader
        tuple of torch.utils.data.DataLoader objects
        if use_test == False, test_dataloader is replaced with an empty list.

    """
    print("create_dataloaders", signals_list)

    if data_type in ["temporal", "FFT"]:
        if data_type == "temporal":
            transform_fn = transforms.TemporalTransform(
                remove_duplicates(signals_list))

        else:  #data_type == "FFT":
            transform_fn = transforms.FFTTransform(
                remove_duplicates(signals_list))

    elif data_type == "spectrogram":
        transform_fn = transforms.SpectrogramTransform(
            remove_duplicates(signals_list), fs, duration_window,
            duration_overlap, spectro_batch_size, interpolation, log_power,
            out_size)

    if fusion_type in ["time", "freq", "depth"]:
        collate_fn = fusion.ConcatCollate(fusion_type,
                                          list_signals=signals_list)
    elif fusion_type in [
            "probas", "scores", "weighted_probas", "weighted_scores", "GBlend",
            "learn2combine", "decorrelated_classic", "decorrelated_deep"
    ]:
        collate_fn = fusion.separate_sensors_collate
    elif fusion_type in [
            "features", "bottleneck", "attention", "selective_fusion"
    ]:
        collate_fn = fusion.ConcatCollate("depth", list_signals=signals_list)
        # a 'depth' collate can be used for feature concatenation (intermediate fusion)
        # thanks to the 'group' argument of convolutional layers
        # see the documentation of basic_CNN for complete explanations

    train_dataset = Datasets.SignalsDataSet(
        mode='train',
        split=split,
        comp_preprocess_first=comp_preprocess_first,
        transform=transform_fn)
    val_dataset = Datasets.SignalsDataSet(
        mode='val',
        split=split,
        comp_preprocess_first=comp_preprocess_first,
        transform=transform_fn)

    if use_test:
        test_dataset = Datasets.SignalsDataSet(
            mode='test',
            split=split,
            comp_preprocess_first=comp_preprocess_first,
            transform=transform_fn)

    batch_size = 64 if fusion_type != 'decorrelated_deep' else 512  # we need full-rank correlation matrices estimation for deep CCA
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   collate_fn=collate_fn,
                                                   shuffle=True)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=batch_size,
                                                 collate_fn=collate_fn,
                                                 shuffle=use_test)
    if use_test:
        test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                      batch_size=batch_size,
                                                      collate_fn=collate_fn,
                                                      shuffle=True)
    else:
        test_dataloader = []

    return train_dataloader, val_dataloader, test_dataloader