def train():
    """
        Function that trains the model
    """
    # Parameters for DataGrnerator constructor
    params = {
        'dim': (15, 16, 3200),
        'batch_size': FLAGS.batch_size,
        'n_classes': 2,
        'n_channels': 1,
        'shuffle': True
    }

    # Get list of filenames for all training data and a dictionary of labels corresponding to each file name
    data, labels = get_data()

    with tf.name_scope('InputBatches'):
        # Generators for training and validation data
        generator_obj = {}
        training_generator = DataGenerator(data['train'], labels, **params)
        validation_generator = DataGenerator(data['valid'], labels, **params)

        # Stores the training and validation batches in a dict
        generator_obj['train'] = training_generator
        generator_obj['valid'] = validation_generator

    # Train the model
    training_fn(generator_obj)
Ejemplo n.º 2
0
    def train_with_datagen(self):
        tensorboard = TensorBoard(log_dir="logs\\{}{}".format(self.model_name, time.time()))
        epoch_path = str(self.model_name) + '-{epoch:02d}.model'
        checkpoint = ModelCheckpoint(epoch_path, period=4)
        # training_boards = list(np.load('known_scores(2.7).npy').item().items())
        # val_boards = list(np.load('test_set(166438)_old.npy').item().items())

        training_boards = list(np.load('train(18000000)_new.npy').item().items())
        val_boards = list(np.load('test(2000000)_new.npy').item().items())

        # training_boards = list(np.load('expanded_train(3600000).npy').item().items())
        # val_boards = list(np.load('expanded_test(400000).npy').item().items())

        # training_boards = list(np.load('train_set(5000000)_FICS.npy').item().items())
        # val_boards = list(np.load('test_set(500000)_FICS.npy').item().items())
        if self.num_classes > 0:
            print('Ternary classification')
            training_generator = DataGenerator(training_boards, batch_size=128, categorical=True)
            validation_generator = DataGenerator(val_boards, batch_size=128, categorical=True)
            self.ternary_classifier()
        else:
            training_generator = DataGenerator(training_boards, batch_size=128)
            validation_generator = DataGenerator(val_boards, batch_size=128)
            self.regression_with_encoder()

        self.model.fit_generator(generator=training_generator,
                                 validation_data=validation_generator,
                                 use_multiprocessing=True,
                                 workers=2,
                                 epochs=32,
                                 verbose=True,
                                 callbacks=[tensorboard, checkpoint])
Ejemplo n.º 3
0
def main():
    """
    Script for evaluating MobileNet with testing dataset (self-driving car data)
    """
    description = "Evaluate MobileNet with test dataset"
    parser = argparse.ArgumentParser(description=description)

    parser.add_argument('-model',
                        '--trained_model',
                        type=str, help='MobileNet trained model')
    parser.add_argument("-data_path",
                        "--data",
                        type=str,
                        help="Path to dataset directory")

    user_args = parser.parse_args()
    batch_size = 16
    path = os.path.join(user_args.data, 'test_labels.npy')
    labels = np.load(path)
    data_gen  = DataGenerator(user_args.data)
    
    print('Evaluating model on {} samples'.format(labels.shape[0]))
    print('Class distribution:')
    count = np.unique(labels, return_counts=True)
    for i in range(3):
        print('{} : {}'.format(count[0][i], count[1][i]))
    model = load_model(user_args.trained_model)
    scores = model.evaluate_generator(data_gen.npy_generator(usage='test', batch_size=batch_size),
                                      steps=np.ceil(labels.shape[0] / batch_size).astype(int),
                                      verbose=1)
    print("Model performance: {}".format(scores[1]))
def generate_synthetic_data(mode: str, num_rows: int,
                            description_filepath: str,
                            synthetic_data_filepath: str):
    '''
    Generates the synthetic data and saves it to the data/ directory.

    Keyword arguments:
    mode -- what type of synthetic data
    num_rows -- number of rows in the synthetic dataset
    description_filepath -- filepath to the data description
    synthetic_data_filepath -- filepath to where synthetic data written
    '''
    generator = DataGenerator()

    if mode == 'random':
        generator.generate_dataset_in_random_mode(num_rows,
                                                  description_filepath)

    elif mode == 'independent':
        generator.generate_dataset_in_independent_mode(num_rows,
                                                       description_filepath)

    elif mode == 'correlated':
        generator.generate_dataset_in_correlated_attribute_mode(
            num_rows, description_filepath)

    generator.save_synthetic_data(synthetic_data_filepath)
Ejemplo n.º 5
0
    def main():
        go_file = TraceTxt()
        sce = Scenery()
        gra = Graphics()
        config = Config()
        fac_aps = FactoryAp()
        fac_users = FactoryUser()
        data_generator = DataGenerator()
        arq = go_file.abreArquivo('result.txt')
        arq_supervisioned = go_file.abreArquivo('supervisioned.txt')

        aps = fac_aps.createAps(config)
        users = fac_users.createUsers(config)
        dados = []

        sp = SupervisedData()

        valuesClean = sp.get_values(config, aps)

        #chama o gerador de ddos apssando a lista de usuarios e aps(com os tributs de localizacao)
        for i in range(0, config.ciclo):
            dados, users = data_generator.createData(aps, config, users)
            #gera grafico
            sce.gerarGrafico(users, aps, i, config, valuesClean)
            #gera arquivo
            go_file.escreveArquivoScenery(i, arq, dados)

        go_file.escreveArquivoSupervisionado(arq_supervisioned, valuesClean)

        gra.plotagemPower(valuesClean, config)
        gra.heatMaps(aps, config)
        #gra.heatMapSinr(aps,config)

        go_file.fechaArquivo(arq)
Ejemplo n.º 6
0
def create_train_valid_data(negative_file, positive_files, num_of_classes, kernel_size=None,
                            custom_file_limit=None):
    """Create generators for train an validation"""

    lines_from_all_files = RBNSreader.read_files(positive_files, file_limit=custom_file_limit)
    if USE_SHUFFLED_SEQS:
        lines_from_all_files += create_negative_seqs(lines_from_all_files)
    else:
        lines_from_all_files += RBNSreader.read_file(negative_file, 0, custom_file_limit * 2)

    np.random.shuffle(lines_from_all_files)

    valid_n = int(valid_p * len(lines_from_all_files))

    validation_data = lines_from_all_files[:valid_n]
    train_data = lines_from_all_files[valid_n:]
    print('validation size', len(validation_data))
    print('train size', len(train_data))
    print('kernel size', kernel_size)
    train_gen = DataGenerator(train_data, num_of_classes=num_of_classes, kernel_sizes=kernel_size,
                              max_sample_size=MAX_SAMPLE_SIZE,
                              batch_size=BATCH_SIZE, shuffle=True)

    valid_gen = None
    if valid_n > 0:
        valid_gen = DataGenerator(validation_data, num_of_classes=num_of_classes, kernel_sizes=kernel_size,
                                  max_sample_size=MAX_SAMPLE_SIZE,
                                  batch_size=BATCH_SIZE, shuffle=True)

    return train_gen, valid_gen
Ejemplo n.º 7
0
 def partition_data(self, data_split):
     self.data_split = data_split
     self.partition = {}
     self.partition["training"] = []
     self.partition["validation"] = []
     for ID in self.list_IDs:
         guitarist = int(ID.split("_")[0])
         if guitarist == data_split:
             self.partition["validation"].append(ID)
         else:
             self.partition["training"].append(ID)
             
     self.training_generator = DataGenerator(self.partition['training'], 
                                             data_path=self.data_path, 
                                             batch_size=self.batch_size, 
                                             shuffle=True,
                                             spec_repr=self.spec_repr, 
                                             con_win_size=self.con_win_size)
     
     self.validation_generator = DataGenerator(self.partition['validation'], 
                                             data_path=self.data_path, 
                                             batch_size=len(self.partition['validation']), 
                                             shuffle=False,
                                             spec_repr=self.spec_repr, 
                                             con_win_size=self.con_win_size)
     
     self.split_folder = self.save_folder + str(self.data_split) + "/"
     if not os.path.exists(self.split_folder):
         os.makedirs(self.split_folder)
Ejemplo n.º 8
0
    def get_data(self):
        with tf.name_scope('data'):
            
            # Training Data Preparation

            # DataGenerator to load data and extract patches for training
            datagen = DataGenerator()
            imgs = datagen.load_imgs_from_directory(directory = self.dir_train,filter='decorr*.mat',max_files=None)
            
            # Extracting overlapping training patches 256x256 
            images = datagen.generate_patches_from_list(imgs, shape=(256,256),num_patches_per_img=2000,augment=False)
            
            np.random.shuffle(images)
            
            X_train_noisy = images
            
            
            #####TRAINING
            #Compute mask for training images to exclude them in the loss computation
            indexes = np.where(X_train_noisy > self.clip)
            self.mask_train = np.ones_like(X_train_noisy,dtype=np.bool)
            self.mask_train[indexes] = False
            
            print('Clipping...')
            #Replace high backscatters with the median
            X_train_noisy = np.clip(X_train_noisy, 0, None)
            self.X_train_noisy_clipped = X_train_noisy
            medians = np.median(X_train_noisy,axis=[1,2],keepdims=True)
            self.X_train_noisy_clipped = np.where(self.X_train_noisy_clipped > self.clip,medians, X_train_noisy)
            
            
            print('Normalizing...')
            #Normalization
            self.X_train_noisy_clipped = (self.X_train_noisy_clipped.astype(np.float32))
            self.X_train_noisy_clipped /= self.norm
            
            self.training_data_wrapper=DataWrapper(self.X_train_noisy_clipped,self.mask_train,self.batch_size,shape=(self.img_rows,self.img_cols))
        
            self.training_data_iter=iter_sequence_infinite(self.training_data_wrapper)
            
            
            
            ######TEST IMAGES
            images_test = datagen.load_imgs_from_directory(directory = self.dir_test,filter='decorr_complex_tsx_SLC_0.mat')
            images_test = np.array(images_test)
            #cropping some test images
            images_test = np.array([images_test[0,0,i:i+1000,j:j+1000,:] for i,j in zip([5000,5500,4000,3000,0,1000,5500],[4000,5000,3500,7000,5000,5000,500])])
            self.images_test = images_test
            #Compute mask for test images to be able to place the point targets back into the denoised estimate
            indexes = np.where(self.images_test > self.clip)
            self.mask_test = np.ones_like(self.images_test,dtype=np.bool)
            self.mask_test[indexes] = False
    
            
            #Clipping high backscattering FOR TEST
            self.images_test_clipped = np.clip(self.images_test, 0, None)
            medians = np.median(self.images_test,axis=[1,2],keepdims=True)
            self.images_test_clipped = np.where(self.images_test_clipped > self.clip,medians, self.images_test)
            #Normalization 
            self.images_test_clipped = (self.images_test_clipped.astype(np.float32)) / self.norm
Ejemplo n.º 9
0
def main():
    """
    Script for evaluating MobileNet with testing dataset (self-driving car data)
    """
    description = "Evaluate MobileNet with test dataset"
    parser = argparse.ArgumentParser(description=description)

    parser.add_argument('-model',
                        '--trained_model',
                        type=str,
                        help='MobileNet trained model')
    parser.add_argument("-data",
                        "--data",
                        type=str,
                        help="Path to dataset Images")
    parser.add_argument("-labels",
                        "--labels",
                        type=str,
                        help="Path to dataset Labels")

    user_args = parser.parse_args()

    data = np.load(user_args.data)
    labels = np.load(user_args.labels)
    data_gen = DataGenerator('')
    prep_data, prep_labels = data_gen.preprocess_data(data,
                                                      labels,
                                                      balance='equals')
    del data
    print('Evaluating model on {} samples'.format(prep_labels.shape[0]))
    print('Class distribution:')
    for i in range(3):
        print('{} : {}'.format(i, np.sum(prep_labels[:, i]).astype(int)))
    model = load_model(user_args.trained_model)
    evaluate(model, prep_data, prep_labels)
Ejemplo n.º 10
0
def run_models():


	# train_data, train_target, train_id = read_and_normalize_train_data()
	X_train, Y_train = get_images(train_dir,train_label)
	X_valid, Y_valid = get_images(val_dir,val_label)

	# callbacks functions
	checkpoint = ModelCheckpoint(weights_dir, monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=1)
	earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=3, verbose=1, mode='auto')
	csv_logger = CSVLogger(log_dir)
	callbacks = [checkpoint, earlystop, csv_logger]


	model, model_h, model_w  = get_model()

	# Generators
	training_generator = DataGenerator(**params).generate(Y_train, X_train, model_h, model_w )
	validation_generator = DataGenerator(**params).generate(Y_valid, X_valid, model_h, model_w )


	# Train model on dataset
	model.fit_generator(generator=training_generator,
						steps_per_epoch=len(X_train) // batch_size,
						epochs=epochs,
						validation_data=validation_generator,
						validation_steps=len(X_valid) // batch_size,
						verbose=1),
						callbacks=callbacks)
Ejemplo n.º 11
0
def trainDataGenerator(model, batch_size, epochs, x_train, y_train, x_test,
                       y_test):

    # Parameters
    params = {
        'dim': (240, 320, 1),
        'batch_size': 64,
        'n_classes': 6,
        'n_channels': 1,
        'shuffle': True
    }
    dataSetPath = os.path.join(os.path.expanduser('~'),
                               '.keras/datasets/skeleton')
    # Generators
    training_generator = DataGenerator(dataSetPath, x_train, y_train, **params)
    validation_generator = DataGenerator(dataSetPath, x_test, y_test, **params)

    ## TRAINING
    model.fit_generator(generator=training_generator,
                        validation_data=validation_generator,
                        epochs=epochs,
                        verbose=1,
                        shuffle=True,
                        callbacks=[
                            TensorBoard(log_dir='/tmp/skeletonmodel',
                                        histogram_freq=0,
                                        write_graph=True,
                                        write_images=False)
                        ])

    model_path = '../test/skeletonmodel.h5'
    model.save(model_path)
    uploadFileToDrive(model_path)
Ejemplo n.º 12
0
    def __init__(self, m = 20, n = 100, T = 1000, shutN = 10, shutC = 5, offP = 0.6, \
                 MapType = 'clust', ReqType = 'CS2', gaN = 5, upp = False, lP = 0):
        self.m = m  # number of stations
        self.n = n  # number of requests
        self.shutN = shutN  # number of shuttles
        self.T = T  # running time
        self.shutC = shutC  # capacity of shuttle
        self.offP = offP  # ratio of offline requests
        self.gaN = gaN  # number of GA steps
        self.upp = upp  # convert dists to upper bound
        self.lP = lP  # acceptable Late time Policy

        self.MG = MapGenerator(self.m, MapType, self.upp)
        self.RG = RequestGenerator(self.MG, ReqType, self.n, self.T, self.offP)
        self.DG = DataGenerator(self.MG, self.RG, self.shutN, self.gaN,
                                self.lP)
        self.requests = self.RG.requests[:]

        print(self.MG)
        print(self.RG)
        print('Stations : {m} | Requests : {r} | Shuttles : {s}\nTime : {t} | Off proportion : {o} | Capacity : {c}\n'\
              .format(m=self.m, r=self.n, s=self.shutN, t=self.T, o=self.offP, c=self.shutC))
        print('------------------------------------')
        self.rDS = self.RG.rDS()

        self.times = []
        pass
Ejemplo n.º 13
0
def generate_data(train_imgs, val_imgs, labels_df, albumentations_train,
                  batch_size, train_dir, resized_dims):
    """
    Args:
        - train_imgs: list of images to be used for training
        - val_imgs: list of images to be used for validation
        - albumentations_train: data augmentation from albumentations package
        - batch_size: batch size for the generator
        - train_dir: directory containing training images
        - resized_dims: dimensions to which images will be resized to
    Returns:
        - data_generator_train: generator object for training images
        - data_generator_val: generator object for validation images
    """

    data_generator_train = DataGenerator(train_imgs,
                                         label_vector=labels_df,
                                         dir_imgs=train_dir,
                                         resized_dims=resized_dims,
                                         batch_size=batch_size,
                                         augmentation=albumentations_train,
                                         shuffle=True)

    data_generator_val = DataGenerator(val_imgs,
                                       label_vector=labels_df,
                                       dir_imgs=train_dir,
                                       batch_size=batch_size,
                                       resized_dims=resized_dims,
                                       shuffle=False)

    return (data_generator_train, data_generator_val)
 def __init__(self, caller, gyro, acc, grav, ext):
     super(CaptureThread, self).__init__()
     self.caller = caller
     self.gyro = gyro
     self.acc = acc
     self.grav = grav
     self.gen = DataGenerator()
     self.extra = ext
Ejemplo n.º 15
0
def train(dataset_path, copy_n):
    path_checkpoints = '/content/gdrive/Team Drives/Models/model-improvement-{epoch:02d}-{val_acc:.2f}.hdf5'
    file_train = os.path.join(dataset_path, 'train_data.npy')
    file_valid = os.path.join(dataset_path, 'valid_data.npy')
    x_train = np.load(file_train, mmap_mode='r')
    x_train_samples = x_train.shape[0]

    x_valid = np.load(file_valid, mmap_mode='r')
    x_valid_samples = x_valid.shape[0]

    del x_train, x_valid

    batch_size = 64
    #epochs = 15
    epochs = 25
    base_model = pretrained.mobilenet.MobileNet(weights='imagenet')
    model = pretrained.mobilenet.MobileNet(weights=None, classes=3)

    # global frozen_weights
    # frozen_weights = base_model.get_layer('conv_pw_1').get_weights()
    keep(model, base_model)
    frozen(model, base_model, n=copy_n)

    del base_model

    sgd = optimizers.SGD(lr=0.01, decay=0.0005, momentum=0.9)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    stopper = EarlyStopping(monitor='val_acc',
                            min_delta=0.0001,
                            patience=3,
                            verbose=1)

    checkpoint = ModelCheckpoint(path_checkpoints,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    # custom_callback = CustomCallback()

    data_gen = DataGenerator(dataset_path)

    model.fit_generator(
        data_gen.npy_generator(usage='train', batch_size=batch_size),
        steps_per_epoch=np.ceil(x_train_samples / batch_size).astype(int),
        validation_data=data_gen.npy_generator(usage='valid',
                                               batch_size=batch_size),
        validation_steps=np.ceil(x_valid_samples / batch_size).astype(int),
        callbacks=[stopper, checkpoint],
        epochs=epochs,
        verbose=1)

    model.save(
        "/content/gdrive/Team Drives/Models/A{}B+-mobilenet.h5".format(copy_n))
    print("Saved model to disk")
def continue_model(infile,
                outdir,
                initial,
                data_amount=1000000,
                data_offset=0,
                validation_amount=100000,
                validation_offset=1000000,
                batch_size=8,
                slide = 5,
                epochs=15,
                verbosity = 1):
    ###FIX NUMPY LOAD FOR DICTIONARIES
    np_load_old = np.load
    np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
    mp.set_start_method("spawn",force=True)
    os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
    ###Tensorflow session
    #config = tf.compat.v1.ConfigProto()
    #config.gpu_options.allow_growth = True
    #tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
    
    outdir = outdir % os.environ["SLURM_JOBID"]
    if not os.path.exists(outdir):
        os.makedirs(outdir)
    model = keras.models.load_model(initial)
    if len(model.layers[0].output_shape[0])==4:
        depth = 1
    else:
        depth = model.layers[0].output_shape[0][1]
    multiple_inputs=False
    if len(model._input_layers) > 1:
        multiple_inputs=True
    #Initialize generators
    data_gen = DataGenerator(infile,
                             data_amount=data_amount,
                             batch_size=batch_size,
                             frames_per_sample=depth,
                             offset=data_offset,
                             sliding_window=slide,
                             labels_structured=multiple_inputs)
    validation_gen = DataGenerator(infile,
                                   data_amount=validation_amount,
                                   batch_size=batch_size,
                                   frames_per_sample=depth,
                                   offset=validation_offset,
                                   sliding_window=slide,
                                   labels_structured=multiple_inputs)
    #os.remove(initial)
    callbacks = [keras.callbacks.ModelCheckpoint(filepath= (outdir+'model_progress_{epoch:02d}.h5')),
                 Logger.JupyterProgbarLogger(count_mode='steps',notebook=False)]
    history = model.fit_generator(generator=data_gen,
                    validation_data=validation_gen,
                    epochs=epochs,
                    verbose=verbosity,
                    use_multiprocessing=True,
                    workers=16,
                    callbacks=callbacks)
    print(model.summary())
 def __init__(self, caller, gyro, acc, grav, classifier, ext=None):
     super(ProcessThread, self).__init__()
     self.caller = caller
     self.gyro = gyro
     self.acc = acc
     self.grav = grav
     self.gen = DataGenerator()
     self.extra = ext
     self.classifier = classifier
Ejemplo n.º 18
0
    def __create_generator(self):
        train_start_seq = self.mysql_client.train_start_seq
        train_end_seq = self.mysql_client.train_end_seq

        validation_start_seq = self.mysql_client.validation_start_seq
        validation_end_seq = self.mysql_client.validation_end_seq

        self.train_generator = DataGenerator(train_start_seq, train_end_seq)
        self.validation_generator = DataGenerator(validation_start_seq, validation_end_seq)
Ejemplo n.º 19
0
def trainTheModelPart1(trData, trLabels, testData, testLabels, cv, model):
    '''
    the network or networks for part 1 are trained here 
    '''

    if CONST_p1.part1Debuging:  #show log if debuging
        from utils.logGeneration import findHowManyImagesInEachClass
        findHowManyImagesInEachClass(trData, "train")
        findHowManyImagesInEachClass(testData, "test")

    optimizer = CONST_p1.optimizer
    loss = CONST_p1.loss

    trWeights = [
        CONST_p1.part1weightDict[trLabels[i]] for i in trLabels
    ]  #creating array of weights for the data since the data is highly unbalanced

    #create data generator
    if CONST_p1.part1usePretrainedNetwork:
        params = CONST_p1.data_generator_params_pretrainedNet
    else:
        params = CONST_p1.data_generator_params
    training_generator = DataGenerator(trData, trLabels, **params)
    test_generator = DataGenerator(testData, testLabels, **params)

    #crete model
    if os.path.exists(CONST_p1.part1trainedModelSavePath
                      ):  #check if the model already exists
        model = keras.models.load_model(CONST_p1.part1trainedModelSavePath)
    else:
        if CONST_p1.part1usePretrainedNetwork:
            model = CONST_p1.model_pretrainedNet
        else:
            model = CONST_p1.model

    #compile
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])

    #fit
    history = model.fit_generator(
        generator=training_generator,
        validation_data=test_generator,
        class_weight=trWeights,
        **CONST_p1.training_parameters
        #callbacks=[keras.callbacks.EarlyStopping(monitor= 'val_acc',patience=3,mode = "max",baseline=80)]#if early stopping used
    )

    #save and plot
    model.save(CONST_p1.part1trainedModelSavePath)
    plotAccAndLoss(history, cv)

    #clean
    del (history)
    model.reset_states()
    del (model)
    tf.reset_default_graph()
    keras.backend.clear_session()
 def _done_convergence(self, area: Area, t_end: int):
     print('\n*********** Convergence is Done ! t = {} ***********'.format(
         t_end))
     print('Targets Cells Probabilities: {}'.format(
         [self.p_S['t'][location] for location in area.targets_locations]))
     DataGenerator.tabulate_matrix(self.p_S['t'])
     # self._plot_target_searching(area=area, t=t_end)
     # self._plot_metrics(t=t_end)
     save_pickle_object(obj_name='p_S_converged', obj=self.p_S['t'])
Ejemplo n.º 21
0
def train(dataset_path):
    path_checkpoints = '/content/gdrive/Team Drives/Models/best_model.hdf5'
    # path_checkpoints = '/content/gdrive/Team Drives/Models/model-improvement-{epoch:02d}-{val_acc:.2f}.hdf5'
    file_train = os.path.join(dataset_path, 'train_labels.npy')
    file_valid = os.path.join(dataset_path, 'valid_labels.npy')
    x_train = np.load(file_train, mmap_mode='r')
    x_train_samples = x_train.shape[0]

    x_valid = np.load(file_valid, mmap_mode='r')
    x_valid_samples = x_valid.shape[0]

    del x_train, x_valid

    batch_size = 64
    epochs = 25

    # Model
    inputs = Input(shape=(10800,)) #working with already flatten image

    x = Dense(1333, activation='relu')(inputs)
    x = Dense(200, activation='relu')(x)
    predictions = Dense(3, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=predictions)

    sgd = optimizers.SGD(lr=0.01, decay=0.0005, momentum=0.9)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
    print("Total number of parameters: {}".format(trainable_count))

    stopper = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=3, verbose=1)

    checkpoint = ModelCheckpoint(path_checkpoints,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    custom_callback = CustomCallback()

    global data_gen
    data_gen = DataGenerator(dataset_path)


    model.fit_generator(data_gen.npy_generator(usage='train', batch_size=batch_size),
                        steps_per_epoch=np.ceil(
                            x_train_samples / batch_size).astype(int),
                        validation_data=data_gen.npy_generator(
                            usage='valid', batch_size=batch_size),
                        validation_steps=np.ceil(
                            x_valid_samples / batch_size).astype(int),
                        callbacks=[stopper, checkpoint, custom_callback],
                        epochs=epochs,
                        verbose=1)
Ejemplo n.º 22
0
def test_generator(args):
    generator = DataGenerator(args.data_root, "train", 90, (0.7, 1.2), (1, 3),
                              0.3, 0.3, 5, 32, (224, 224), 3, True, True, True)

    n_samples = 100
    i = 0
    for img in generator.generate():
        i += 1
        if i > n_samples:
            break
Ejemplo n.º 23
0
def main():
    NewDataset = DataGenerator("./dataset/comments_cleaned_1st_phase.csv",
                               "./dataset/digi_cleaned_phase2.csv"
                               )  # This line doesn't have a valid save_path
    start_time = time.time()
    NewDataset.Data_Import()
    NewDataset.Data_cleaner()
    NewDataset.Save_Dataset()
    print("============================")
    print("Finished in %.2f s" % (time.time() - start_time))
Ejemplo n.º 24
0
def main():
    dataGen = DataGenerator(iterationNum=flag.batch_size)
    print(dataGen.iterationNum)
    random_image_reconstruction = dataGen.get_random_images(10)
    print(random_image_reconstruction.shape)
    model = B_VAE( [None, dataGen.image_shape[0], dataGen.image_shape[1]],gamma= flag.gamma, capacity_limit= flag.capacity_limit,capacity_change_duration= flag.capacity_duration)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = load_checkpoints(sess)
    if flag.training:
        train(sess,model,dataGen, saver)
Ejemplo n.º 25
0
def main():
    params = {
        'dim': (8, 8),
        'batch_size': 256,
        'n_classes': 3,
        'n_channels': 7,
        'shuffle': True
    }

    IDs, num_IDs = setIDs('IDs_shuf.csv')

    pivot = int(np.floor(num_IDs * 0.8))

    partitions = {'train': IDs[:pivot], 'validation': IDs[pivot:]}

    train_generator = DataGenerator(partitions['train'], **params)
    validation_generator = DataGenerator(partitions['validation'], **params)

    #X, y = train_generator._DataGenerator__data_generation(partitions['train'][0:params['batch_size']])

    #print(X[0])

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(4, 4),
               data_format='channels_first',
               batch_size=params['batch_size'],
               batch_input_shape=(params['batch_size'], 7, 8, 8)))
    model.add(Activation('relu'))
    model.add(Conv2D(64, kernel_size=(2, 2), data_format='channels_first'))
    model.add(Flatten())
    model.add(Dense(3))
    model.add(Activation('softmax'))
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print(model.summary())

    filepath = "/data/models"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 mode='max')

    model.fit_generator(generator=train_generator,
                        validation_data=validation_generator,
                        use_multiprocessing=True,
                        workers=16,
                        epochs=10,
                        verbose=1,
                        callbacks=[checkpoint])
Ejemplo n.º 26
0
def main():
    gains = [40, 50, 60]
    tx_beams = np.arange(0, 24)
    num_samples_tot_gain_tx_beam = 10000

    # Order is gain *

    indexes = np.arange(
        0,
        num_samples_tot_gain_tx_beam * len(tx_beams) * len(gains)
    )
    batch_size = 32
    data_path = '/media/michele/rx-12-tx-tm-0-rx-tm-1.h5'

    num_blocks_per_frame = 15
    how_many_blocks_per_frame = 1
    num_samples_per_block = 2048
    num_tx_beams = len(tx_beams)
    input_size = 1024
    
    dg = DataGenerator(
        indexes,
        batch_size,
        data_path,
        num_tx_beams,
        num_blocks_per_frame,
        input_size,
        num_samples_per_block,
        how_many_blocks_per_frame,
        shuffle=False,
        is_2d=False
    )

    batch_gain_tx_beam = num_samples_tot_gain_tx_beam / batch_size


    # for [i_g, val_g] in enumerate(gains):
    #     print("Gain: " + str(val_g))
    #     for [i_t, val_t] in enumerate(tx_beams):
    #         print("Beam idx: " + str(val_t))
    #         batch_index = (i_g * len(tx_beams) * batch_gain_tx_beam) + i_t * batch_gain_tx_beam
    #         print("Batch idx: " + str(batch_index))
    #         [batch, batch_y] = dg.__getitem__(batch_index)
    #         print("tx_beam %d y % s" % (val_t, batch_y[0]))
    #         # print(batch_y[0])


    for i in range(dg.__len__()):
        print("Batch idx: " + str(i))
        [batch, batch_y] = dg.__getitem__(i)
        print("tx_beam %s %s y %s %s" % (batch[0][0], batch[-1][0], batch_y[0], batch_y[-1]))
        print("batch_x_size: %s, batch_y_size: %s" % (str(batch.shape), str(batch_y.shape)))
def bayesian_updating_iteration(params,
                                until_convergence: bool = True,
                                sensitivity_analysis: bool = False):
    start_time = dt.now()
    DataGenerator.param_validation(params=params)
    targets_locations = eval(params['TARGET_LOCATIONS'])

    if targets_locations is None and params.getint('NUM_TARGETS'):
        targets_locations = Area.generate_targets(
            num_targets=params.getint('NUM_TARGETS'))

    area = Area(num_cells_axis=params.getint('N'),
                t_interval=params.getint('T_INTERVAL'),
                pta=params.getfloat('pta'),
                alpha=params.getfloat('alpha'),
                targets_locations=targets_locations)

    agent = Agent(current_location=eval(params['AGENT_POSITION']),
                  lambda_strength=params.getint('LAMBDA_SENSOR'),
                  p_S=Agent.get_p_S_from_initial_prior(
                      prior=params.getfloat('INITIAL_PRIOR_P'), area=area),
                  entropy_updates=[],
                  information_gain_updates=[],
                  converge_iter=200 - 1,
                  p_S_history=[])
    """
    2 Running modes: until convergence (all the targets identified and not False Positives) or until infinity
    """
    if not sensitivity_analysis:
        agent.bayesian_update(area=area,
                              until_convergence=until_convergence,
                              verbose=True,
                              stop_after_iteration=200)
        print('Bayesian Updating Done in {}'.format(dt.now() - start_time))

    else:
        target_prob = agent.get_prob_history(targets_locations[2])
        # non_target_loc = random.choice([loc for loc in zip(range(params.getint('N')), range(params.getint('N'))) if loc not in targets_locations])
        # Randomly selected non targets
        list_of_loc = [(9, 10), (7, 15), (1, 16)]
        agent_pos = eval(params['AGENT_POSITION'])
        non_target_prob = {}
        for loc in list_of_loc:
            dist = np.round(
                np.sqrt(
                    np.power(loc[0] - agent_pos[0], 2) +
                    np.power(loc[1] - agent_pos[1], 2)))
            non_target_prob[dist] = agent.get_prob_history(loc)

        ent_upd, info_gain_upd, conv_iter = agent.get_metrics()

        return ent_upd, info_gain_upd, conv_iter, target_prob, non_target_prob
Ejemplo n.º 28
0
 def __BuildGenerators(self):
     dataTrain, labelsTrain, dataTest, labelsTest = TrainTestSplit(
         self.data, self.labels, .95)
     self.train_generator = DataGenerator(dataTrain, labelsTrain,
                                          self.batch_size, self.vocab_size,
                                          self.max_input_len,
                                          self.max_output_len,
                                          self.token_index)
     self.valid_generator = DataGenerator(dataTest, labelsTest,
                                          self.batch_size, self.vocab_size,
                                          self.max_input_len,
                                          self.max_output_len,
                                          self.token_index)
Ejemplo n.º 29
0
def train_model(num_classes=5,
                num_samples=1,
                meta_batch_size=16,
                random_seed=1234,
                itr_cnt=25000,
                cell_count=128,
                lr=0.001,
                model=None,
                logDir='./logs',
                modelDir='./models',
                shuffle=True):

    random.seed(random_seed)
    np.random.seed(random_seed)
    tf.random.set_seed(random_seed)

    data_generator = DataGenerator(num_classes, num_samples + 1)

    o = model
    if o is None:
        o = MANN2(num_classes, num_samples + 1, cell_count=cell_count)
    optim = tf.keras.optimizers.Adam(learning_rate=lr)

    modelName = f'K={num_samples}&N={num_classes}&CellCount={cell_count}&BS={meta_batch_size}'
    writer = tf.summary.create_file_writer(f'{logDir}/{modelName}/')

    with writer.as_default():
        for step in range(itr_cnt):
            i, l = data_generator.sample_batch('train',
                                               meta_batch_size,
                                               shuffle=shuffle)
            _, ls = train_step(i, l, o, optim)

            if (step + 1) % 100 == 0:
                print("*" * 5 + "Iter " + str(step + 1) + "*" * 5)
                i, l = data_generator.sample_batch('test', 100)
                pred, tls = train_step(i, l, o, optim, eval=True)
                pred = tf.reshape(
                    pred, [-1, num_samples + 1, num_classes, num_classes])
                pred = tf.math.argmax(pred[:, -1, :, :], axis=2)
                l = tf.math.argmax(l[:, -1, :, :], axis=2)
                test_accuracy = tf.reduce_mean(
                    tf.cast(tf.math.equal(pred, l), tf.float32)).numpy()

                tf.summary.scalar('Test Accuracy', test_accuracy, step=step)
                tf.summary.scalar('Training Loss', ls.numpy(), step=step)
                tf.summary.scalar('Test Loss', tls.numpy(), step=step)
                writer.flush()
                o.save_weights(f'{modelDir}/{modelName}')
                print("Test Accuracy", test_accuracy, "Train Loss:",
                      ls.numpy(), "Test Loss:", tls.numpy())
Ejemplo n.º 30
0
    def test(self):
        # Generate training set
        test_set = self.Data.unseen_pairs

        test_gen = DataGenerator(pairs=test_set, batch_size=self.param_grid['batch_size'],
                                 nr_files=len(self.Data.all_files), nr_tests=len(self.Data.all_tests),
                                 negative_ratio=self.param_grid['negative_ratio'])

        X, y = next(test_gen.data_generation(test_set))
        pred = self.model.predict(X)

        pred[pred < 0.5] = 0
        pred[pred >= 0.5] = 1
        return y, pred
Ejemplo n.º 31
0
    def generate_graph(self):
        self.params = {
            'graph'           : None,
            'start_idx'       : int(self.start_airport.text()),
            'end_idx'         : int(self.destination_airport.text()),
            'max_flights'     : int(self.max_flights.text()),
            'cost_weight'     : int(self.cost_weight.text()),
            'time_weight'     : int(self.time_weight.text()),
            'pop_size'        : int(self.pop_size.text()),
            'generations'     : int(self.generation.text()),
            'mutation_rate'   : float(self.mutation_rate.text()),
            'tournament_size' : int(self.tournament_size.text()),
            'elitism'         : bool(self.elitism.currentText()),
            'dest_min'        : int(self.dest_min.text()),
            'dest_max'        : int(self.dest_max.text()),
            'max_flights'     : 4,
        }
        data = DataGenerator()
        DataGenerator.DESTINATIONS_MIN = self.params['dest_min']
        DataGenerator.DESTINATIONS_MAX = self.params['dest_max']

        # if input_graph_file is not None:
        #     data.load_saved_graph(input_graph_file)
        #
        # else:
        #TODO ilosc lotnisk
        data.load_new_data(10)
        data.create_graph()

        # if graph_save_file is not None:
        #     data.save_graph(graph_save_file)

        testsuite_airports = data.get_airports()
        testsuite_graph = data.get_graph()

        self.graph = GraphManager(self.params['max_flights'])
        self.graph.set_graph(testsuite_graph, testsuite_airports)

        airports_parser = Testsuite_airports_parser(testsuite_airports)
Ejemplo n.º 32
0
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
import numpy as np
    
def take_integer_from_user(message):
    inp = raw_input(message)
    while True:
        try: 
            int(inp)
            break
        except ValueError: 
            inp = raw_input('You must specify a number: ')
    return int(inp)

fig_swiss = plt.figure()
dg = DataGenerator()
inp = take_integer_from_user('Press 1 for photos, 2 for Swiss Roll: ')
if inp == 1 :
    a = dg.generateFacesPoints("C:\\Users\\Elrassam\\Desktop\\Faces\\s")
    numOfPoints = 400
elif inp == 2:   
    numOfPoints = take_integer_from_user('Enter the number of points: ')
    a=np.zeros((numOfPoints,3))
    ins = open( "1500.txt", "r" )
    i = 0
    for line in ins:
        j = 0
        nums = line.split(' ')
        for nu in nums:
            a[i,j] = nu
            j += 1
Ejemplo n.º 33
0
import random, time
import numpy as np

#A test program to test data generator
from DataGenerator import DataGenerator
from GridMessageProtocol import *

dg=DataGenerator(n=1, m=8, randseed=1024)
#print "0:0"
#print "-1:1"
while True:
    x=dg.generate_measurement(t=time.time(), id_range=(0,8))
    alist=[item for sublist in x for item in sublist]
    print "0:%f" %alist[5]
Ejemplo n.º 34
0

# Customize output
####################################
CLASSNAMES = ['A','B','C','D']
NUMBER_OF_CLASSES = len(CLASSNAMES)
NUMBER_OF_DIMENSIONS = 10
NUMBER_OF_SAMPLES_PER_CLASS = 2000
####################################


if __name__ == '__main__':

	# GENERATE DEPENDENT DATA
		
	generate = DataGenerator()
	samples = []
	trees = {}
	for c in CLASSNAMES:
		tree = generate.generateTree(['X'+`i` for i in range(NUMBER_OF_DIMENSIONS)])
		data = generate.generateDependentData(NUMBER_OF_SAMPLES_PER_CLASS, c, tree)
		sample = data['samples']
		for s in sample:
			s.append(data['name'])
			samples.append(s)

		trees[c] = tree

	saveJSONtoFile("flare.json", trees['A'].getJSONFormat())

	testingSet = []
Ejemplo n.º 35
0
if int(raw_input("Jesli chcesz domysle wartosci -1, jesli nie -2\n")) == 2:
    per = float(raw_input("podaj liczbe okresow\n"))
    fr = float(raw_input("podaj frame_rate\n"))
    fq = float(raw_input("podaj czestotliwosc\n"))
    ns = float(raw_input("podaj poziom szumu\n"))
    am = float(raw_input("podaj amplitude\n"))
    off = float(raw_input("podaj offset\n"))
else:
    per = 4
    fr = 44100
    fq = 200
    ns = 100
    am = 1
    off = 0
generator = DataGenerator()
generator.change_number_of_periods(per)
generator.change_framerate(fr)
generator.change_frequency(fq)
generator.change_noise(ns)
generator.change_amplitude(am)
generator.change_offset(off)


def my_sin(x, freq, amplitude, phase, offset):
    #musi byc list comprehention bo wywala blad przy mnozeniu listy przez float'a
    return np.sin([i * freq + phase for i in x]) * amplitude + offset
p0 = sy.array([1,1,1,1])


def func(x, a, b, c):
Ejemplo n.º 36
0
        T = times.shape[0]
        times.shape = (T,1)
        n = state.shape[0]
        x0 = state[:,0].T
        xt = state[:,1].T
        x0.shape = ( 1,n )
        xt.shape = ( 1,n )
        x = x0.repeat( T, 0 ) + sp.dot( times-t0, xt )
        return x
        
if __name__ == "__main__":
    import matplotlib.pyplot as plt
    m = 8
    n = 8
    
    dg = DataGenerator(n=n,m=m)
    
    se = StateEstimator(dg.EstimationMatrix)
    
    time_offset = time.time()
    # time_offset = 0.0
    times = sp.arange(time_offset+0.0, time_offset+1.7, 1.0/3.0).reshape(-1,1)
    T = times.shape[0]
    print "==times.shape[0]=="
    print times.shape[0]
    # state = sp.hstack( [ (sp.pi-times)*sp.cos(2*times), (1+times)*sp.sin(3*times)] )
    state = sp.vstack( [ dg.generate_state(t).T for t in times ] )
    print "==state.shape=="
    print state.shape
    print "===state==="
    print state