Example #1
0
def train(sp=-1):
    model = KerasModel()
    train = DataGenerator('train')
    test = DataGenerator('test')
    
    if sp != -1:
        chkp = '.' + os.sep + 'models' + os.sep + 'save_' + str(sp) + '.h5'
        model.load_weights(chkp)
        print('start point: %d'%sp)
    print("开始训练:")
  
    model.fit_generator(    
        generator=train,
        #steps_per_epoch:在声明一个epoch完成并开始下一个epoch之前从generator产生的总步数,它通常应该等于你的数据集的样本数量除以批量大小,对于Sequence,它是可选的,如果未指定,将使用len(generator)作为步数
        #按理说是按照上面的标准进行的,但是因为我们数据集较少,所以我们使用更多的数量跑
        #在测试的时候我运用的是14是完全按照理论做的
        #如果现在的次数不能够正常运行请重新改回14并增加时代
        steps_per_epoch=3000,
        #epochs 确定世代的次数
        epochs=epochs_time,
        verbose=1,
        #validation_steps是步长
        validation_steps=100
        #想要了解更多fit_generator的参数:https://blog.csdn.net/qq_32951799/article/details/82918098
    )
        #每个世代的模型都做保存
    model.save_weights('.' + os.sep + 'models' + os.sep + 'save_' +str(epochs_time)+ '.h5' )
Example #2
0
def generate_dataset(dataset_path, cols_size):
    for size in cols_size:
        block_cnt = 20
        block_len = 30
        dgen = DataGenerator(block_cnt, block_len, 9999, 0)
        random.seed(1)
        f = open(dataset_path + "/" + str(size) + ".txt", 'w')
        for i in range(1, block_cnt + 1):
            block_to_use = i
            for col in dgen.generate_columns(size // 20, block_to_use):
                f.write(str(col) + '\n')
Example #3
0
    def run(self):
        clean_data = CleanData()
        clean_data.run()

        calc_corr = CalcCorrMatrix()
        calc_corr.run()

        data_generator = DataGenerator()
        train_feature, train_label, test_feature, test_label = data_generator.run()

        return train_feature, train_label, test_feature, test_label
Example #4
0
    def __init__(self, env, cur_length, full_dataset_length, max_length,
                 num_samples, epsilon):
        self.env = env
        self.cur_length = cur_length
        self.max_length = max_length
        self.num_samples = num_samples
        self.epsilon = epsilon
        self.full_dataset_length = full_dataset_length

        self.generator = DataGenerator(env, epsilon)
        self.states_full, self.actions, _, _ = self.generator.calc_data_full(
            self.full_dataset_length)
        self.reinitialize()
    def __init__(self, env, cur_length, full_dataset_length, max_length,
                 num_samples, epsilon):
        self.env = env
        self.cur_length = cur_length
        self.max_length = max_length
        self.num_samples = num_samples
        self.epsilon = epsilon
        self.full_dataset_length = full_dataset_length

        self.generator = DataGenerator(env, epsilon)
        self.states_full, self.actions, self.next_states_full,\
            self.masks_full = self.generator.calc_data_full(self.full_dataset_length)
        n = self.cur_length - self.full_dataset_length
        if n > 0:
            self.states_rand, self.next_states_rand, self.masks_rand\
                = self.generator.calc_data_rand(self.states_full[-1].view(-1, 2, 2, 2), self.actions, n)
def main(modelName):
    INPUT_SHAPE = (6000, 12)
    NUM_CLASSES = 5
    BATCH_SIZE = 16
    EPOCHS = 80

    labels = load_obj('sampleLabels')

    params = {
        'dim': INPUT_SHAPE[0],
        'batchSize': BATCH_SIZE,
        'numClasses': NUM_CLASSES,
        'numChannels': INPUT_SHAPE[1],
        'shuffle': True
    }

    callbacksList = [
        keras.callbacks.EarlyStopping(monitor='val_loss',
                                      mode='min',
                                      verbose=1,
                                      patience=25),
        keras.callbacks.ModelCheckpoint('E1/' + modelName + '_wts_b.hdf5',
                                        monitor='val_acc',
                                        verbose=1,
                                        save_best_only=True,
                                        mode='max')
    ]

    trainIDs = load_obj('trainIDs2')
    valIDs = load_obj('valIDs2')
    testIDs = load_obj('testIDs2')

    trainingGen = DataGenerator(trainIDs, labels, **params)
    validationGen = DataGenerator(valIDs, labels, **params)
    testingGen = DataGenerator(testIDs, labels, **params)

    keras.backend.clear_session()

    model = Model.factory(modelName, INPUT_SHAPE, NUM_CLASSES)

    model.fit(BATCH_SIZE, EPOCHS, trainingGen, validationGen, callbacksList)

    model.load_weights('E1/' + modelName + '_wts_b.hdf5')

    model.test(testingGen)
Example #7
0
def train(sp=-1):
    model = KerasModel()

    train = DataGenerator('train')
    test = DataGenerator('test')

    if sp != -1:
        chkp = '.' + os.sep + 'models' + os.sep + 'save_' + str(sp) + '.h5'
        model.load_weights(chkp)
    print('start point: %d' % sp)

    for i in range(sp + 1, 100):
        model.fit_generator(generator=train,
                            samples_per_epoch=3000,
                            nb_epoch=1,
                            validation_data=test,
                            nb_val_samples=100)
        model.save_weights('.' + os.sep + 'models' + os.sep + 'save_' +
                           str(i) + '.h5')
class RandomStateDataset(data.Dataset):
    def __init__(self, env, cur_length, full_dataset_length, max_length,
                 num_samples, epsilon):
        self.env = env
        self.cur_length = cur_length
        self.max_length = max_length
        self.num_samples = num_samples
        self.epsilon = epsilon
        self.full_dataset_length = full_dataset_length

        self.generator = DataGenerator(env, epsilon)
        self.states_full, self.actions, self.next_states_full,\
            self.masks_full = self.generator.calc_data_full(self.full_dataset_length)
        n = self.cur_length - self.full_dataset_length
        if n > 0:
            self.states_rand, self.next_states_rand, self.masks_rand\
                = self.generator.calc_data_rand(self.states_full[-1].view(-1, 2, 2, 2), self.actions, n)

    def reinitialize(self):
        self.states_full, self.actions, self.next_states_full,\
            self.masks_full = self.generator.calc_data_full(self.full_dataset_length)
        n = self.cur_length - self.full_dataset_length
        if n > 0:
            self.states_rand, self.next_states_rand, self.masks_rand\
                = self.generator.calc_data_rand(self.states_full[-1].view(-1, 2, 2, 2), self.actions, n)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, _):
        length = torch.randint(0, self.cur_length, ())
        if length < self.full_dataset_length:
            idx = torch.randint(0, len(self.states_full[length]), ())
            state = self.states_full[length][idx]
            next_states = self.next_states_full[length][idx]
            mask = self.masks_full[length][idx]
        else:
            length = length - self.full_dataset_length
            idx = torch.randint(0, len(self.states_rand[length]), ())
            state = self.states_rand[length][idx]
            next_states = self.next_states_rand[length][idx]
            mask = self.masks_rand[length][idx]
        return {'state': state, 'next_states': next_states, 'mask': mask}
Example #9
0
    def run(self):
        if self.preprocess == 1:
            print "cleaning data...\n"
            clean_data = CleanData(self.file_path)
            clean_data.run()
            print "cleaning data successfully\n"

        print "calculating correlation matrix...\n"
        calc_corr = CalcCorrMatrix()
        corr_matrix = calc_corr.run()
        print "calculatE successfully\n"

        print "generating dataset...\n"
        data_generator = DataGenerator('../data/', self.code, self.ratio,
                                       corr_matrix, self.train_set_choice)
        train_feature, train_label, test_feature, test_label = data_generator.run(
        )
        print "generate successfully\n"

        return train_feature, train_label, test_feature, test_label, corr_matrix
Example #10
0
    def compute(self, text_tokenizer, inv_map, k_means, embed, num_words,
                comb):
        input_1 = Input(shape=(num_words, ))
        #process_1 = Dense(1024)(input_1)
        #process_2 = Activation('tanh')(process_1)
        process_3 = Dense(1)(input_1)
        output_1 = Activation('tanh')(process_3)
        model_1 = Model(input_1, output_1)
        print(model_1.summary())

        input_2 = Input(shape=(4, num_words))
        process_4 = TimeDistributed(model_1)(input_2)
        process_5 = Flatten()(process_4)
        process_6 = Dense(1)(process_5)
        output_2 = Activation('sigmoid')(process_6)
        model_2 = Model(input_2, output_2)
        print(model_2.summary())

        train_generator = DataGenerator(self.train, self.train_labels,
                                        text_tokenizer, inv_map, k_means,
                                        embed, num_words)
        valid_generator = DataGenerator(self.valid, self.valid_labels,
                                        text_tokenizer, inv_map, k_means,
                                        embed, num_words)

        model_2.compile(optimizer='Adam',
                        loss='binary_crossentropy',
                        metrics=['accuracy'])
        model_2.fit_generator(generator=train_generator,
                              validation_data=valid_generator,
                              epochs=2,
                              verbose=1)

        # a = model_2.predict_generator(data_yield_1(valid_sequences,valid_results,valid_clusters,992),steps=62)
        #print(a[:10])
        #a = np.argmax(a,axis=1)
        #b = np.argmax(valid_results,axis=1)
        #print(a[:100],valid_results[:10],b[:10])
        #print(classification_report(b,a))
        return model_2
Example #11
0
class TestDataset(data.Dataset):
    def __init__(self, env, cur_length, full_dataset_length, max_length,
                 num_samples, epsilon):
        self.env = env
        self.cur_length = cur_length
        self.max_length = max_length
        self.num_samples = num_samples
        self.epsilon = epsilon
        self.full_dataset_length = full_dataset_length

        self.generator = DataGenerator(env, epsilon)
        self.states_full, self.actions, _, _ = self.generator.calc_data_full(
            self.full_dataset_length)
        self.reinitialize()

    def reinitialize(self):
        n = self.cur_length - self.full_dataset_length
        if n > 0:
            self.states_rand, _, _ = self.generator.calc_data_rand(
                self.states_full[-1].view(-1, 2, 2, 2), self.actions, n)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, _):
        length = torch.randint(0, self.cur_length, ())
        if length < self.full_dataset_length:
            idx = torch.randint(0, len(self.states_full[length]), ())
            state = self.states_full[length][idx]
            return {'state': state, 'length': length + 1}
        else:
            length = length - self.full_dataset_length
            idx = torch.randint(0, len(self.states_rand[length]), ())
            state = self.states_rand[length][idx]
            return {
                'state': state,
                'length': length + self.full_dataset_length + 1
            }
Example #12
0
def main(inputs_path='data/inputs/',
         targets_path='data/5-shot-targets/',
         save_confusion=True):
    """Convolutional Neural Network Creation and Evaluation. Uses data collected from the Physiobank data set and stored
    in the specified input and target folders"""

    print("Running...")

    model = define_model()

    train_files, test_files, validation_files = get_input_file_names(
        inputs_path)

    # generate training/test/validation data in batches
    train_generator = DataGenerator(inputs_path, targets_path, train_files)
    validation_generator = DataGenerator(inputs_path, targets_path,
                                         validation_files)
    test_generator = DataGenerator(inputs_path, targets_path, test_files)

    # run model training and evaluation
    es = EarlyStopping(monitor='val_acc',
                       mode='max',
                       patience=5,
                       verbose=1,
                       restore_best_weights=True)
    history = model.fit_generator(train_generator,
                                  validation_data=validation_generator,
                                  epochs=100,
                                  verbose=1,
                                  callbacks=[es])
    _, accuracy = model.evaluate_generator(test_generator, verbose=0)

    # create test set and targets
    x_test, y_test = [], []
    test_generator.on_epoch_end()
    for i in range(len(test_generator)):
        x_batch, y_batch = test_generator[i]
        x_test.extend(x_batch)
        y_test.extend(y_batch)
    x_test = np.array(x_test)
    y_test = np.array(y_test)
    y_prediction = model.predict(x_test)

    save_model(model)

    evaluate_model(model, history, accuracy, y_test, y_prediction,
                   save_confusion)

    return None
Example #13
0
tf.app.flags.DEFINE_string('rnn_type', default_value='uni-dir',
                           docstring='''uni-dir or bi-dir.''')
tf.app.flags.DEFINE_float('initial_lr', default_value=0.00001,
                           docstring='''Initial learning rate for training.''')
tf.app.flags.DEFINE_integer('num_filters', default_value=64,
                           docstring='''Number of convolutional filters.''')
tf.app.flags.DEFINE_float('moving_avg_decay', default_value=0.9999,
                           docstring='''Decay to use for the moving average of weights.''')
tf.app.flags.DEFINE_integer('num_epochs_per_decay', default_value=5,
                           docstring='''Epochs after which learning rate decays.''')
tf.app.flags.DEFINE_float('lr_decay_factor', default_value=0.9,
                           docstring='''Learning rate decay factor.''')

logger.info('Running model for the following paramethers:')

train_data = DataGenerator(FLAGS.data_dir, FLAGS.batch_size)
valid_data = DataGenerator('C:\\Users\\user\\Desktop\\tfVirtEnv\\deepBrain\\data\\json\\Patient1\\valid.json', FLAGS.batch_size)

num_channels = 32 # compute from data

def inference_graph(inputs, seq_len, batch_size, prob, graph):
    '''
    Function to create the model graph for training and evaluation.
    :inputs: ECoG ndarray of shape (batch_size, T, CH)
    :seq_len: list (shape = (batch_size)) of ints which hold the lengh of each sequence
    :train: bool that indicates if the graph is used for train or evaluation
            If false deactivate the dropout layer.
    :returns: tf.graph, logits
    '''
    
    num_channels = 32 # compute from data
Example #14
0
                    self.pool_of_ack.remove(ack)

    def get_all_peers(self):
        peers = []
        with open('peers_info.json') as f:
            peers_json = json.load(f)
        for port, name in peers_json.items():
            peers.append([name, port])
        return peers

    def broadcast_data(self, data, type):
        all_nodes = self.get_all_peers()
        for node in all_nodes:
            self.send_data(node, data, type)

    def run(self):
        receiving_thread = threading.Thread(target=self.receive_data)
        receiving_thread.start()
        ack_thread = threading.Thread(target=self.send_ack_to_peers)
        ack_thread.start()
        sync_thread = threading.Thread(target=self.sync_with_peers())
        sync_thread.start()


p1 = Node("peer1")
p2 = Node("peer2")
data_generator = DataGenerator()

# node1.send_data(['127.0.0.1', node2.port], "data")
# n2.broadcast_data("Im node 2 and msg is broadcasted")
Example #15
0
def main(trainF, testF, validF, modelF):
    batch_size = 32
    num_classes = 2
    epochs = 100
    patience = 5#int(epochs/20)

    targetIdx = range(3, 5)
    currentStateIdx = range(1, 3) # Includes number of time steps

    # input image dimensions
    img_rows, img_cols = 128, 96

    # Parameters
    params = {'dim': (img_rows, img_cols, 3),
              'batch_size': batch_size,
              'n_channels': 3,
              'preTrain': preTrain,
              'shuffle': True}

    # File name
    partition = readInp(trainF, testF, validF)
    trainF = partition['train'][:, 0]
    testF = partition['test'][:, 0]
    validF = partition['valid'][:, 0]
    trainTarget = partition['train'][:, targetIdx].astype(float)
    testTraget = partition['test'][:, targetIdx].astype(float)
    validTarget = partition['valid'][:, targetIdx].astype(float)

    # Current state position
    trainCurrState = partition['train'][:, currentStateIdx].astype(float)
    testCurrState = partition['test'][:, currentStateIdx].astype(float)
    validCurrState = partition['valid'][:, currentStateIdx].astype(float)

    training_generator = DataGenerator(trainF, trainCurrState, trainTarget, **params)
    valid_generator = DataGenerator(validF, validCurrState, validTarget, **params)

    #
    #testparams = {'dim': (img_rows, img_cols, 3),
    #              'batch_size': 64,
    #              'n_channels': 3,
    #              'shuffle': False}
    #test_generator = DataGenerator(testF, testTraget, **testparams)

    # Input that stores the previous step
    model = spatialModel(img_rows, img_cols)
    model.compile(loss=keras.losses.mse,
                  optimizer=keras.optimizers.Adam(lr=0.001),
                  metrics=['mse', 'mae'])

    #pre_dict = evaluate(model, modelF + '.h5', valid_generator)
    #truedict = dict(zip(validF, validTarget))
    #for i in range(36):
    #    true = np.zeros((19, 2), dtype=float)
    #    pred = np.zeros((19, 2), dtype=float)
    #    for j in range(1, 19):
    #       key = './output//valid//input%d.lp_%d.png'%(i+80, j-1)
    #       true[j, :] = truedict[key]
    #       pred[j, :] = pre_dict[key]
    #
    #    pred = np.cumsum(pred, axis=0)
    #    true = np.cumsum(true, axis=0)
    #    pred = np.array(pred)
    #    true = np.array(true)
    #
    #    import matplotlib.pyplot as plt
    #    plt.scatter(pred[:, 0], pred[:, 1], c='r')
    #    plt.scatter(true[:, 0], true[:, 1], c='g')
    #    plt.show()

    early_stopping = EarlyStopping(monitor='mean_squared_error', patience=patience)
                                  #validation_data=valid_generator,
                                  #callbacks=[early_stopping,],
    history = model.fit_generator(generator=training_generator,
                                  use_multiprocessing=False,
                                  epochs=epochs, verbose=1, workers=1)

    model.save(modelF)
    del model
    print('Done saving model: %s'%modelF)
    return
Example #16
0
    mse_x = ((p3d_out_17x3[:, :, 0] - p3d_gt_17x3[:, :, 0]) * p3d_std)**2
    mse_y = ((p3d_out_17x3[:, :, 1] - p3d_gt_17x3[:, :, 1]) * p3d_std)**2
    mse_z = ((p3d_out_17x3[:, :, 2] - p3d_gt_17x3[:, :, 2]) * p3d_std)**2

    mean_mse_xy = tf.reduce_mean(tf.sqrt(mse_x + mse_y))
    mean_mse_xz = tf.reduce_mean(tf.sqrt(mse_x + mse_z))
    mean_mse_yz = tf.reduce_mean(tf.sqrt(mse_y + mse_z))

    return p3d_out_17x3[0, 2, 0]


tf.enable_eager_execution()
with tf.Session(config=config) as sess:

    trainGenerator = DataGenerator(DATA_PATH,
                                   batch_size=BATCH_SIZE,
                                   val_fraction=VAL_FRACTION,
                                   name="train")
    valGenerator = DataGenerator(DATA_PATH,
                                 batch_size=BATCH_SIZE,
                                 val_fraction=VAL_FRACTION,
                                 name="val")
    # define model
    model = get_model(batch_size=BATCH_SIZE)
    if LOAD:
        latest = tf.train.latest_checkpoint("./log/test8/saved_models/retrain")
        print("loading weight from", latest)
        model.load_weights(latest)

    # optimizer
    optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
    # saver
Example #17
0
                            pass
    print("Found " + str(len(labels)) + " labels.")
    outputAges = []
    for label in list(labels.values()):
        for i in range(len(ageClassLabels)):
            if label[0] >= ageClassLabels[i][0] and label[0] <= ageClassLabels[
                    i][1]:
                outputAges.append(oneHotEncode(i, len(ageClassLabels)))
                break
    outputAges = np.array(outputAges, dtype=float)

    print("Binned Ages into classes. Shape: " + str(outputAges.shape))

    imageIDs = np.array(list(labels.keys()))

    trainData, testData = DataGenerator(np.array(list(labels.keys())),
                                        outputAges, 100).split(validationSplit)

    ConvTemplate = functools.partial(
        tf.keras.layers.Conv2D,
        bias_initializer=tf.keras.initializers.lecun_normal(),
        kernel_initializer=tf.keras.initializers.lecun_normal(),
        activation=tf.keras.activations.relu)
    ConvTemplate.__name__ = 'ConvTemplate'

    def LayerTemplate(filters, kernel_size):
        def Layer(x):
            return tf.keras.layers.BatchNormalization()(ConvTemplate(
                filters=filters, kernel_size=kernel_size))

        return Layer
def main():
    ########################################################
    # Initialize
    ########################################################
    global keysPressed
    dataIndex = 0

    EpisodicMemCap = 100
    dataIndex_episode = pickle.load(
        open("trainingData/episodeData/dataIndex_episode.p", "rb"))

    # Load model
    WIDTH = HEIGHT = 224
    FILENUM = pickle.load(
        open("trainingData/Unbalanced_rgb_299/dataIndex_{}.p".format('both'),
             "rb"))
    #FILENUM_both = 458
    model = get_Keras_model('both', WIDTH, HEIGHT, 8, "AdamReg", FILENUM)

    # Setup for screenGrab
    last_time = time.time()
    W, H = 575, 525
    dsp = display.Display()
    screen = dsp.screen()
    root = dsp.screen().root

    countDown(5)

    while True:
        ########################################################
        #Reinforcement Loop
        ########################################################

        isSolved = False
        trainingData = []

        while not isSolved:
            """
            During the puzzle, this loop will play till it gets determined that
            the puzzle has been solved.
            """
            ############################
            # Get Image
            ############################
            image = grabscreen(root, W, H)
            image_rs = cv2.resize(image, (WIDTH, HEIGHT))

            ############################
            # Make Predictions
            ############################
            body_moves = [0, 0, 0, 0, 0]
            head_moves = [0, 0, 0, 0, 0]
            p_body, p_head = model.predict(
                [image_rs.reshape(1, WIDTH, HEIGHT, 3)])
            # This method ensures that each frame, the agent will pick the most likely option.
            body_m_index = np.argmax(p_body[0])
            head_m_index = np.argmax(p_head[0])

            body_moves[body_m_index] = 1
            head_moves[head_m_index] = 1

            print(body_moves)
            print(head_moves)

            ############################
            # See if Supervisor overrides
            ############################
            trainingData = SupervisorCheck(cv2.resize(image,
                                                      (299, 299)), body_moves,
                                           head_moves, trainingData)

            print('loop took {:0.3f} seconds'.format(time.time() - last_time))
            last_time = time.time()

            ############################
            # Store episodic memory to file.
            ############################
            if len(trainingData) == 100:
                # Sets of 100 so I don't need to preprocess.
                print(dataIndex)
                shuffle(trainingData)  # Just randomize it now.
                dataIndex += 1

                #Write to new datafile. Formatted to fit with current dataGenerator code.
                np.save(
                    "trainingData/episodeData/episodeData/data_{}".format(
                        dataIndex), trainingData)

                # Restarts array for next batch
                trainingData = []

            if 'm' in keysPressed or 'M' in keysPressed:
                # My manual way to break the loop
                isSolved = True
            """
            Needs to be there for the async hook and this synced loop
            to keep up with each other
            """
            time.sleep(0.01)

            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break

        print("Number of frames in Episodic Memory: {}".format(dataIndex))

        #Stop the agent
        move_body([0, 0, 0, 0, 1])
        move_head([0, 0, 0, 0, 1])

        if dataIndex > EpisodicMemCap:
            ########################################################
            # Retrain the Agent's Policy
            ########################################################

            for i, layer in enumerate(model.layers):
                print(i, layer.name)

            # Only train the last dense layers that I personally added to VGG16.
            Layer_End = 19
            for layer in model.layers[:Layer_End]:
                layer.trainable = False
            for layer in model.layers[Layer_End:]:
                layer.trainable = True

            # Training Setup
            EPOCHS = 4
            DTYPE = DATA_TYPE = 'episodeData'

            TrainLen = dataIndex - 10
            Indicies = np.array(range(1, dataIndex + 1))
            shuffle(Indicies)
            TrainIndex = Indicies[:TrainLen]
            ValidIndex = Indicies[TrainLen:]

            params = {
                'WIDTH': WIDTH,
                'HEIGHT': HEIGHT,
                'DTYPE': DTYPE,
                'DATA_TYPE': DATA_TYPE,
                'isConcat': True,
                'batch_size': 1,
                'shuffle': True
            }

            training_generator = DataGenerator(Indicies, **params)
            validation_generator = DataGenerator(ValidIndex, **params)

            # Defining my callbacks:
            filepath = "models/SupervisedReinforcement/best_weights.h5"
            checkpoint = ModelCheckpoint(filepath,
                                         monitor='val_dense_5_acc',
                                         verbose=1,
                                         save_best_only=True,
                                         mode='max')
            callbacks_list = [checkpoint]

            # compile the model (should be done *after* setting layers to non-trainable)
            model.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                          loss='categorical_crossentropy',
                          metrics=['acc'])
            #model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['acc'])

            # Train the model on the new data for a few epochs
            model.fit_generator(generator=training_generator,
                                validation_data=validation_generator,
                                callbacks=callbacks_list,
                                use_multiprocessing=True,
                                workers=6,
                                epochs=EPOCHS,
                                steps_per_epoch=dataIndex)

            #Reload the best weights from that update session
            model.load_weights(
                "models/SupervisedReinforcement/best_weights.h5")

            # Save data rather than delete because it is like 10k data samples

            dataIndex_episode += 1
            shutil.move(
                "trainingData/episodeData/episodeData",
                "trainingData/episodeData/episodeDataArchived/E{}".format(
                    dataIndex_episode))
            subprocess.call(['mkdir', 'trainingData/episodeData/episodeData'])
            pickle.dump(
                dataIndex_episode,
                open("trainingData/episodeData/dataIndex_episode.p", "wb"))

            dataIndex = 0

        else:
            # wait till I tell it to start testing again.
            testStart = False

            while not testStart:
                if 'n' in keysPressed or 'N' in keysPressed:
                    testStart = True
import h5py
#%% train generator
paramsTrain = {
    'trackName': 'skank',
    'encoding': 'one-hot',
    'batch_size': 32,
    'maxLength': 16,
    'timesteps': 8,
    'seqOverlap': 'max',
    'shuffle': True,
    'dataAugmentation': True
}

paramsTrain['datasetPath'] = './dataset/'

trainGenerator = DataGenerator(**paramsTrain)

#%% model

# hyperparameters
timesteps = trainGenerator.timesteps
nFeat = trainGenerator.nFeatures
lstmUnits = 64
lstmAct = 'tanh'
outputUnits = nFeat
dropout = 0.2
nEpochs = 1
lr = 10e-3

# optimizer
Adam = tf.keras.optimizers.Adam(lr)
Example #20
0
                                                                 image_type = gen_param['imgType'],
                                                                 label_type = gen_param["labelType"])

    train_ids["train"] = train_ids["train"][:args.n_samples]

    # Test set remains the same
    test_ids = generatePartitionTrainAndValidFromFolderRandomly(imagePathFolder = val_param['imgPath'],
                                                                _validProportion = val_param['validprop'],
                                                                shuffle_train_val = val_param['shuffletrain'],
                                                                threshold=20,
                                                                labelPathFolder = val_param['labelPath'],
                                                                image_type = val_param['imgType'],
                                                                label_type = val_param["labelType"])

    # Generate data generator objects.
    training_generator   = DataGenerator(train_ids['train'], gen_param['imgPath'], gen_param['labelPath'], norm_param, gen_param["augment"], aug_param, **misc_param_gen)
    validation_generator = DataGenerator(test_ids['validation'], val_param['imgPath'], val_param['labelPath'], norm_param, val_param["augment"], aug_param, **misc_param_val)

    # Generate the model.
    model = load_model(args.path + "model.h5", compile=False)
    model.summary()

    # Set up  metrics.
    metrics = [dice_coef_background, dice_coef_first_label]
    # Set up loss function
    lossfunc = create_loss_func(mp)
    opt = Adam(lr=mp["learningrate"])
    model.compile(optimizer=opt, loss=lossfunc, metrics=metrics)

    # Print report prior to fit of the model.
    print_report(p)
preProc.saveArticles()

# Initialising the RNN
regressor = Sequential()

# Adding the input layerand the LSTM layer
regressor.add(
    LSTM(units=32,
         activation='relu',
         input_shape=(preProc.seq_length, preProc.data_dim)))

# Adding the output layer
regressor.add(Dense(units=1))

# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')

list_IDs = range(preProc.num_ts * preProc.num_articles)

batch_size = 32

# Initializing the generator
training_generator = DataGenerator(batch_size=batch_size).generate(list_IDs)

# Fitting the RNN to the Training set
regressor.fit_generator(generator=training_generator,
                        steps_per_epoch=len(list_IDs) // batch_size,
                        epochs=10)

regressor.save('model_full_data_7ts_32u_lstm_adam_32b_10e_sh')
Example #22
0
import pickle
from dataGenerator import DataGenerator
from utils import visualize_2D_trip

last = 19000
i = 19100
seed_ = 19053
with open("supervised_data/batch_seed_" + str(last) + "_" + str(i) + ".pkl",
          'rb') as handle:
    b = pickle.load(handle)

dataset = DataGenerator()
input_batch, dist_batch = dataset.create_batch(256, 50, 2, seed=seed_)

visualize_2D_trip(input_batch[100], b[seed_][100], 'niente', 1)
Example #23
0
def train(in_dir,
          hdf5_dir,
          hdf5_name,
          dim_patch=224,
          pre_trained='',
          rs_rate=4,
          balancing=False):
    """
    Training pipeline
    Performing training and evaluation of the tiramisu model on the given dataset
    The dataset should be loaded into a hdf5 file
    """
    # ------------------------------------------------------------------------------------------------------------------- #
    # Read the loaded HDF5 file
    hdf5_file = h5py.File(hdf5_dir + hdf5_name + '.hdf5', 'r')

    # ------------------------------------------------------------------------------------------------------------------- #
    # Generate batch sample
    # settings
    batch_size = 10
    num_fusion = 0  # the number of previous/following patch to be fused in the input

    params_train = {
        'hdf5_file': hdf5_file,
        'dim_x': dim_patch,
        'dim_y': dim_patch,
        'dim_z': 3,
        'batch_size': batch_size,
        'num_fusion': num_fusion,
        'tag': 'Train',
        'aug': rs_rate,
        'balancing': balancing
    }

    params_val = {
        'hdf5_file': hdf5_file,
        'dim_x': dim_patch,
        'dim_y': dim_patch,
        'dim_z': 3,
        'batch_size': batch_size,
        'num_fusion': num_fusion,
        'tag': 'Val'
    }

    train_setting = DataGenerator(**params_train)
    val_setting = DataGenerator(**params_val)

    # calculate class weighting
    class_weighting = train_setting.class_weight
    num_train = len(train_setting.crop_list)
    num_val = len(val_setting.crop_list)

    # Generators
    training_generator = train_setting.generate()
    validation_generator = val_setting.generate()

    # ------------------------------------------------------------------------------------------------------------------- #
    # record the settings,training process,results in a file
    import time
    curr_date = time.strftime("%d/%m/%Y")
    curr_time = time.strftime("%H:%M:%S")
    file_id = curr_date[0:2] + curr_date[3:5] + '_' + curr_time[0:2]

    # file_id = 'test'
    # create a new folder
    newPath = '../Result/' + file_id

    try:
        os.makedirs(newPath)
    except OSError:
        if not os.path.isdir(newPath):
            raise

    np.save(newPath + 'mean_train', train_setting.mean)

    text_file = open(newPath + 'Log.txt', "w")
    text_file.write("Date: %s\n" % curr_date)
    text_file.write("Start time: %s\n\n" % curr_time)

    # ------------------------------------------------------------------------------------------------------------------- #
    # load the model (and weights):
    model_id = hdf5_name
    # Tiramisu_3D().create([dim_patch, dim_patch], [4, 6, 8], 10, [8, 6, 4], 12, 0.0001, 0.5, 5, model_id)
    Tiramisu().create([dim_patch, dim_patch], [4, 6, 8], 10, [8, 6, 4], 12,
                      0.0001, 0.5, 5, model_id)

    load_model_name = '../Model/tiramisu_fc_dense_' + model_id + '.json'
    with open(load_model_name) as model_file:
        tiramisu = models.model_from_json(model_file.read())
        if len(pre_trained) > 0:
            tiramisu.load_weights(pre_trained, by_name=True)

    # specify optimizer
    optimizer = Nadam(lr=0.0005)

    # number of epochs
    nb_epoch = 20

    # metrics using accuracy or IoU
    tiramisu.compile(loss="categorical_crossentropy",
                     optimizer=optimizer,
                     metrics=["accuracy"])

    # checkpoint 278
    TensorBoard = callbacks.TensorBoard(log_dir='./logs',
                                        histogram_freq=5,
                                        write_graph=True,
                                        write_images=True)

    filePath = newPath + 'prop_tiramisu_weights_' + model_id + '.best.hdf5'
    checkpoint = ModelCheckpoint(filePath,
                                 monitor='val_acc',
                                 verbose=2,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='max')
    # the training stops when the validation accuracy stops improving for 5 epochs
    earlyStopping = EarlyStopping(monitor='val_acc',
                                  patience=4,
                                  verbose=1,
                                  mode='max')

    callbacks_list = [checkpoint, earlyStopping]

    # ------------------------------------------------------------------------------------------------------------------- #
    # record the settings in a text file
    with open('../Model/Setting.txt', 'r') as model_setting_log:
        model_setting = model_setting_log.read()

    text_file.write(model_setting)
    text_file.write("\nData set: C1_17\n")
    text_file.write('\nBalancing: %s' % str(train_setting.balancing))
    text_file.write('\nSampling rate: %d' % train_setting.aug)

    text_file.write("\nclass weights:\n")
    for w_item in class_weighting:
        text_file.write("%f\n" % w_item)

    text_file.write("\n# training data: %d\n" % num_train)
    text_file.write("# validation data: %d\n" % num_val)
    text_file.write("model: %s\n" % load_model_name)
    # text_file.write("loaded the weights: %s\n\n"%"1303_19/prop_tiramisu_weights_69.best.hdf5")

    text_file.write("optimizer = Nadam(lr=0.0005)\n")
    text_file.write("loss function: categorical_crossentropy\n\n")
    text_file.write("# epoch: %d\n batch size: %d\n" % (nb_epoch, batch_size))
    text_file.write("weights stored in: %s\n" % filePath)
    text_file.write("number of stacks: %d\n" % (num_fusion * 2 + 1))
    text_file.write("no floating ice in training set")
    text_file.close()

    # ------------------------------------------------------------------------------------------------------------------- #
    # Fit the model
    history = tiramisu.fit_generator(
        generator=training_generator,  # gen(train_setting),
        steps_per_epoch=math.ceil(num_train / float(batch_size)),
        validation_data=validation_generator,  # gen(val_setting),
        validation_steps=math.ceil(num_val / float(batch_size)),
        epochs=nb_epoch,
        verbose=1,
        callbacks=callbacks_list)

    # This save the trained model weights to this file with number of epochs
    tiramisu.save_weights(newPath + 'prop_tiramisu_weights_' + model_id +
                          '_{}.hdf5'.format(nb_epoch))

    # ------------------------------------------------------------------------------------------------------------------- #
    # plot and save training history
    import matplotlib.pyplot as plt
    # list all data in history
    print(history.history.keys())
    # summarize history for accuracy
    plt.figure(1)
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.savefig(newPath + 'acc.png')
    # summarize history for loss
    plt.figure(2)
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.savefig(newPath + 'loss.png')

    # save history to numpy array
    log_history = [[]] * 4
    log_history[0] = np.array(history.history['loss'])
    log_history[1] = np.array(history.history['val_loss'])
    log_history[2] = np.array(history.history['acc'])
    log_history[3] = np.array(history.history['val_acc'])

    np.save(newPath + 'Train_history', log_history)

    hdf5_file.close()
Example #24
0
    RADIUS = 750  # space around the subject
    xroot, yroot, zroot = vals[0, 0], vals[0, 1], vals[0, 2]
    ax.set_xlim3d([-RADIUS + xroot, RADIUS + xroot])
    ax.set_zlim3d([-RADIUS + zroot, RADIUS + zroot])
    ax.set_ylim3d([-RADIUS + yroot, RADIUS + yroot])

    ax.view_init(elev=-90., azim=-90)

    ax.get_xaxis().set_ticklabels([])
    ax.get_yaxis().set_ticklabels([])
    ax.set_zticklabels([])
    ax.set_aspect('equal')


Generator = DataGenerator(DATA_PATH, batch_size=BATCH_SIZE, name="train")
images = Generator.__getitem__(0)[0]
#annotations = unnormalize_pose_numpy(p3d,0,2*1100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = "0"

with tf.Session(config=config) as sess:
    if PREDICT:
        model = get_model(batch_size=BATCH_SIZE)
        # Load weights into the new model
        latest = tf.train.latest_checkpoint(SAVE_PATH)
        print("weights: ", latest)
        model.load_weights(latest)
        p3d_out, _ = model.predict(x=images, verbose=1)
        p3d_out = unnormalize_pose_numpy(p3d_out, 0, 1100 * 2)
Example #25
0
def train_network(name, the_model, config, train, test, LABELS, num_cores):
    PREDICTION_FOLDER = 'models_for_top_five_cat'  #"predictions_1d_conv_output_2"
    MODEL_PATH = PREDICTION_FOLDER + '/' + name + '/'
    if not os.path.exists(PREDICTION_FOLDER):
        os.mkdir(PREDICTION_FOLDER)
    if not os.path.exists(PREDICTION_FOLDER + '/' + name):
        os.mkdir(PREDICTION_FOLDER + '/' + name)
    if os.path.exists('../logs/' + PREDICTION_FOLDER + name + '/'):
        shutil.rmtree('../logs/' + PREDICTION_FOLDER + name + '/')
    skf = StratifiedKFold(n_splits=config.n_folds)
    splits = skf.split(train.label, train.actual_label_idx)
    fold_acc = []
    for i, (train_split, val_split) in enumerate(splits):
        train_set = train.iloc[train_split]
        val_set = train.iloc[val_split]
        checkpoint = ModelCheckpoint(MODEL_PATH + 'best_%d.h5' % i,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True)
        early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
        tb = TensorBoard(log_dir='./logs/' + name + '/' + '/fold_%d' % i,
                         write_graph=True)
        callbacks_list = [checkpoint, early, tb]
        print("Fold: ", i)
        print("#" * 50)
        model = the_model(config)
        train_generator = DataGenerator(config,
                                        'audio_train/',
                                        train_set.index,
                                        train_set.actual_label_idx,
                                        batch_size=64,
                                        preprocessing_fn=audio_norm)
        val_generator = DataGenerator(config,
                                      'audio_train/',
                                      val_set.index,
                                      val_set.actual_label_idx,
                                      batch_size=64,
                                      preprocessing_fn=audio_norm)

        history = model.fit_generator(train_generator,
                                      callbacks=callbacks_list,
                                      validation_data=val_generator,
                                      epochs=config.max_epochs,
                                      use_multiprocessing=True,
                                      workers=num_cores,
                                      max_queue_size=20)

        model.load_weights(MODEL_PATH + 'best_%d.h5' % i)

        # Save train predictions
        train_generator = DataGenerator(config,
                                        'audio_train/',
                                        train.index,
                                        batch_size=128,
                                        preprocessing_fn=audio_norm)
        predictions = model.predict_generator(train_generator,
                                              use_multiprocessing=True,
                                              workers=num_cores,
                                              max_queue_size=20,
                                              verbose=1)
        np.save(MODEL_PATH + "train_predictions_%d.npy" % i, predictions)

        # Save test predictions
        test_generator = DataGenerator(config,
                                       'audio_train/',
                                       test.index,
                                       batch_size=128,
                                       preprocessing_fn=audio_norm)
        predictions = model.predict_generator(test_generator,
                                              use_multiprocessing=True,
                                              workers=num_cores,
                                              max_queue_size=20,
                                              verbose=1)
        np.save(MODEL_PATH + "test_predictions_%d.npy" % i, predictions)
        # Make a submission file
        top_3 = np.array(LABELS)[np.argsort(-predictions, axis=1)[:, :1]]
        predicted_labels = [' '.join(list(x)) for x in top_3]
        test['predicted_label'] = predicted_labels
        num_wrong = 0
        for entry in test.values:
            if (entry[2] != entry[0]):
                num_wrong += 1
        fold_acc.append((1 - (num_wrong / test.shape[0])) * 100)

    #     test[['pred_label']].to_csv(PREDICTION_FOLDER + "/predictions_%d.csv"%i)
    f = open(MODEL_PATH + 'fold_accuracies.csv', 'a+')
    for i in range(len(fold_acc)):
        val = "Fold_" + str(i) + ", " + str(fold_acc[i]) + '\n'
        f.write(val)
    val = "Avg_accuracy, " + str(sum(fold_acc) / len(fold_acc))
    f.write(val)
    f.close()
    f = open(MODEL_PATH + 'fold_accuracies.csv', 'a+')
    for i in range(len(fold_acc)):
        val = "Fold_" + str(i) + ", " + str(fold_acc[i]) + '\n'
        f.write(val)
    val = "Avg_accuracy, " + str(sum(fold_acc) / len(fold_acc))
    f.write(val)
    f.close()
Example #26
0
    for cv_index in range(len(chunks)):
        trainset = []
        for i in range(len(chunks)):
            if not i == cv_index:
                trainset = trainset + chunks[i]
        valset = chunks[cv_index]

        partition = dict()
        partition['train'] = trainset
        partition['validation'] = valset

        # Generate data generator objects.
        training_generator = DataGenerator(partition['train'],
                                           gen_param['imgPath'],
                                           gen_param['labelPath'], norm_param,
                                           gen_param['augment'], aug_param,
                                           **misc_param)
        validation_generator = DataGenerator(partition['validation'],
                                             gen_param['imgPath'],
                                             gen_param['labelPath'],
                                             norm_param, gen_param['augment'],
                                             aug_param, **misc_param)

        # Generate the model.
        n_rows, n_cols, n_slices = mp['x_end'] - mp['x_start'], mp[
            'y_end'] - mp['y_start'], mp['z_end'] - mp['z_start']
        model = UNet_3D(input_shape=(n_rows, n_cols, n_slices, mp['channels']),
                        nb_labels=mp['labels'],
                        filters=mp['features'],
                        depth=mp['depth'],
Example #27
0
SPACE = " "
SPECIAL_CHARS = "?!,."
ALPHANUMERIC = string.printable[:62]
CHARS = ALPHANUMERIC + SPECIAL_CHARS + SPACE


INPUT_SOURCE_NAME = "iam_word"
BATCH_SIZE = 16
MAX_TEXT_LENGTH = 128
CHARSET_BASE = CHARS


dtgen = DataGenerator(
    source=f"/home/kuadmin01/terng/Dataset/dataset_filter.hdf5",
    batch_size=BATCH_SIZE,
    charset=CHARSET_BASE,
    max_text_length=MAX_TEXT_LENGTH,
    predict=False
)

INPUT_SHAPE = (1024, 128, 1)
OUTPUT_SHAPE = dtgen.tokenizer.vocab_size + 1

inputs, outputs = ResFlor(input_shape=INPUT_SHAPE, output_shape=OUTPUT_SHAPE)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.RMSprop(
    learning_rate=5e-4), loss=ctc_loss)
model.summary()

callbacks = Callback(INPUT_SOURCE_NAME, 'Flor')
Example #28
0
def pool_operation(input_b, it):
    from concorde.tsp import TSPSolver
    import os
    # Concorde
    solver = TSPSolver.from_data(input_b[it, :, 0] * 1000,
                                 input_b[it, :, 1] * 1000,
                                 norm="EUC_2D")
    # Find tour
    solution = solver.solve()
    return solution.tour


config = get_config()

dataset = DataGenerator()
real_tour_concorde = {}
pool = Pool(processes=8)
last = 10000
if not os.path.exists("supervised_data"):
    os.mkdir("supervised_data")

for i in tqdm(range(10000, 30000)):  # test instance
    seed_ = 1 + i
    reward, tour = [], []
    input_batch, dist_batch = dataset.create_batch(config.batch_size,
                                                   config.graph_dimension,
                                                   config.dimension,
                                                   seed=seed_)

    sys.stdout = open(os.devnull, "w")
Example #29
0
params = {
    'WIDTH': WIDTH,
    'HEIGHT': HEIGHT,
    'DTYPE': DTYPE,
    'DATA_TYPE': DATA_TYPE,
    'isConcat': True,
    'batch_size': 1,
    'shuffle': True
}

MODEL_NAME = 'pytalos_{}_{}_{}_{}_files_{}_epocs_{}_{}.h5'.format(
    DTYPE, ARCH, OPTIMIZER, FILENUM, EPOCHS_1, DATA_TYPE, LR)
model_path = "models/{}/{}".format(DTYPE, MODEL_NAME)

training_generator = DataGenerator(TrainIndex, **params)
validation_generator = DataGenerator(ValidIndex, **params)

# Define the model w/ Keras from their documentation on applications
base_model = VGG16(weights='imagenet',
                   include_top=False,
                   input_shape=(WIDTH, HEIGHT, 3))

# Add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# Add two fully connected layers

x = Dense(1024, activation='relu', kernel_regularizer=regularizers.l2(0.01))(x)

# Branch out to both modalities, body and head movement.
Example #30
0
print(dir_actor)
print(config.version)

tf.reset_default_graph()
actor = Actor(config)  # Build graph

# Save & restore all the variables.
variables_to_save_all = [
    v for v in tf.global_variables() if 'Adam' not in v.name
]
saver_all = tf.train.Saver(var_list=variables_to_save_all,
                           keep_checkpoint_every_n_hours=1.0)

########################################## TRAIN #################################

dataset = DataGenerator()

tf.set_random_seed(123)
tf.random.set_random_seed(123)
temperature = 1.0
last = 10000

save_path = "saveSL/" + dir_actor
if not os.path.exists(save_path):
    os.makedirs(save_path)

restore_path = "saveSL/" + dir_actor + "/actor.ckpt"
with tf.Session() as sess:  # start session+ "next"
    count_no_weights()
    sess.run(tf.global_variables_initializer())  # run initialize op