コード例 #1
0
def generate_load_data(signal_data_path, key_path, which_line, which_letter,
                       key_length, batch_size):
    # def parallel_load(signalname, signal_data_path):
    #     nonlocal signal_nparray
    #     wav = np.loadtxt(signal_data_path + signalname, delimiter=',', dtype='str')
    #     wav = np.delete(wav, 10000)
    #     wav = wav.astype(np.float32)
    #     signal_nparray.append(wav)
    """

    :param signal_data_path: path of signal folder
    :return: all signal numpy array
    """
    while True:
        signal_list = os.listdir(signal_data_path)
        print(signal_list)
        # signal_list.sort(key = lambda x:int(x[10:-4]))
        key_label = cut_letter(key_path, which_line, which_letter, key_length)
        # key_label_lb = to_categorical(key_label,16)

        if len(key_label) == len(signal_list):
            pass
        else:
            print(len(key_label), len(signal_list))
            raise ValueError

        batch_size = batch_size
        indx_to_count = 0
        train_x_batch = np.zeros((batch_size, 35000, 1))
        train_y_batch = np.zeros((batch_size, 16))
        for indx, signal in enumerate(signal_list):

            wav = np.loadtxt(signal_data_path + '/' + signal,
                             delimiter=',',
                             dtype='str')
            wav = np.delete(wav, 35000)
            wav = wav.astype(np.float32)
            key_label_lb = to_categorical(key_label[indx], 16)
            key_label_lb = np.expand_dims(key_label_lb, axis=0)
            """
            must use np.expand_dims(key_label_lb,axis=0)  to change the data.shape to (1,16)  from (16,)
            """
            # print(key_label_lb.shape)
            train_x = np.expand_dims(wav, axis=2)
            train_x = np.expand_dims(train_x, axis=0)

            train_x_batch[indx_to_count] = train_x
            train_y_batch[indx_to_count] = key_label_lb

            if indx_to_count == batch_size - 1:
                # print(train_x_batch.shape)
                # print(train_y_batch.shape)
                yield ({'input_1': train_x_batch}, {'dense_1': train_y_batch})
                train_x_batch = np.zeros((batch_size, 35000, 1))
                train_y_batch = np.zeros((batch_size, 16))
                indx_to_count = -1

            indx_to_count += 1
コード例 #2
0
def resnet_model(trainFilePath,
                 testFilePath,
                 batch_size,
                 epochs,
                 name,
                 lr,
                 key_file,
                 which_line,
                 which_letter,
                 key_length,
                 load_weight=False,
                 weight_path=None,
                 evalONtest=True):
    input_tensor = Input(shape=(35000, 1))
    seq = input_tensor
    seq = BLOCK(seq, 64)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 64)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 128)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 128)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 256)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 256)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 512)
    seq = BatchNormalization(axis=1)(seq)
    seq = MaxPooling1D(2)(seq)

    seq = BLOCK(seq, 512)
    seq = BatchNormalization(axis=1)(seq)

    seq = Dropout(0.6)(seq)

    seq = GlobalMaxPooling1D()(seq)

    output_tensor = Dense(16, activation='softmax')(seq)

    # model = Model(inputs=[input_tensor], outputs=[output_tensor])

    model = Model(inputs=[input_tensor], outputs=[output_tensor])

    model = multi_gpu_model(model, gpus=3)
    model.summary()

    if load_weight == True:
        model.load_weights(weight_path, by_name=True)
    else:
        pass

    from keras.optimizers import Adam
    model.compile(
        loss='categorical_crossentropy',  # 交叉熵作为loss
        optimizer=Adam(lr),
        metrics=['accuracy'])

    if evalONtest == True:
        test_model = Model(inputs=[input_tensor], outputs=[output_tensor])

        test_model.compile(
            loss='categorical_crossentropy',  # 交叉熵作为loss
            optimizer=Adam(lr),
            metrics=['accuracy'])
        CSV_FILE_PATH2 = testFilePath
        data_to_test = load_data(CSV_FILE_PATH2)

        # train_x2, test_x2, train_y2, test_y2, Class_dict2
        # train_x2 = np.expand_dims(train_x2, axis=2)
        # test_x2 = np.expand_dims(test_x2, axis=2)

    else:
        pass

    print('开始加载数据')
    data_to_train = load_data(trainFilePath)
    # data_to_train = data_to_train[:10000]
    label_to_train = cut_letter(key_file, which_line, which_letter, key_length)
    label_to_train_lb = to_categorical(label_to_train, 16)

    train_x, test_x, train_y, test_y = train_test_split(data_to_train,
                                                        label_to_train_lb,
                                                        test_size=0.3,
                                                        shuffle=True)

    train_x = np.expand_dims(train_x, axis=2)
    test_x = np.expand_dims(test_x, axis=2)

    b_size = batch_size
    max_epochs = epochs
    print("Starting training ")

    learnratedecay = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1,
                                       patience=8,
                                       verbose=1,
                                       mode='auto',
                                       epsilon=0.0001,
                                       cooldown=0,
                                       min_lr=0)
    os.makedirs('/data/wuchenxi/allmodel/new_simeck_model/' + name + '/model',
                exist_ok=True)
    os.makedirs('/data/wuchenxi/allmodel/new_simeck_model/' + name + '/csvlog',
                exist_ok=True)
    os.makedirs('/data/wuchenxi/allmodel/new_simeck_model/' + name +
                '/tensorboard',
                exist_ok=True)

    checkpointer = ModelCheckpoint(
        monitor='val_loss',
        filepath='/data/wuchenxi/allmodel/new_simeck_model/' + name +
        '/model/' + name + '.hdf5',
        verbose=1,
        save_best_only=True)
    picture_output = TensorBoard(
        log_dir='/data/wuchenxi/allmodel/new_simeck_model/' + name +
        '/tensorboard/' + name + '_log',
        histogram_freq=0,
        write_graph=True,
        write_grads=True,
        write_images=True,
    )
    csvlog = CSVLogger(filename='/data/wuchenxi/allmodel/new_simeck_model/' +
                       name + '/csvlog/' + name + '.csv',
                       separator=',',
                       append=False)

    if evalONtest == True:
        pass
        # callback = [checkpointer, picture_output, csvlog, learnratedecay,
        #             EvaluateInputTensor(test_model, train_x2, train_y2,
        #                                 '/data/wuchenxi/allmodel/simeck_key_model/'+name+'/csvlog/' + name + '_test.csv')]
    else:
        callback = [checkpointer, picture_output, csvlog, learnratedecay]
    h = model.fit(train_x,
                  train_y,
                  batch_size=b_size,
                  epochs=max_epochs,
                  validation_data=(test_x, test_y),
                  shuffle=True,
                  verbose=1,
                  callbacks=callback)
コード例 #3
0
def resnet_model(trainFilePath,
                 testFilePath,
                 batch_size,
                 epochs,
                 name,
                 lr,
                 key_file,
                 which_line,
                 which_letter,
                 key_length,
                 test_size,
                 use_add=False,
                 each_class_number=None,
                 choose_number=None,
                 load_weight=False,
                 weight_path=None,
                 evalONtest=True):
    # input_tensor = Input(shape=(35000, 1))
    # seq = input_tensor
    # seq = BLOCK(seq, 64)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 64)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 128)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 128)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 256)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 256)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 512)
    # seq = BatchNormalization(axis=1)(seq)
    # seq = MaxPooling1D(2)(seq)
    #
    # seq = BLOCK(seq, 512)
    # seq = BatchNormalization(axis=1)(seq)
    #
    # # seq = Dropout(0.6)(seq)
    # seq = Dropout(0.1)(seq)
    #
    # seq = GlobalMaxPooling1D()(seq)
    #
    # output_tensor = Dense(16, activation='softmax')(seq)

    # model = Model(inputs=[input_tensor], outputs=[output_tensor])

    anchor_input = Input((
        35000,
        1,
    ), name='anchor_input')
    positive_input = Input((
        35000,
        1,
    ), name='positive_input')
    negative_input = Input((
        35000,
        1,
    ), name='negative_input')

    Shared_DNN = create_base_network([35000, 1])

    encoded_anchor = Shared_DNN(anchor_input)
    encoded_positive = Shared_DNN(positive_input)
    encoded_negative = Shared_DNN(negative_input)

    merged_vector = concatenate(
        [encoded_anchor, encoded_positive, encoded_negative],
        axis=-1,
        name='merged_layer')
    model = Model(inputs=[anchor_input, positive_input, negative_input],
                  outputs=merged_vector)

    # model = Model(inputs=[input_tensor], outputs=[output_tensor])
    # model = multi_gpu_model(model,gpus=4)
    model.summary()

    if load_weight == True:
        model.load_weights(weight_path, by_name=True)
    else:
        pass

    from keras.optimizers import Adam
    model.compile(
        loss=triplet_loss,  # 交叉熵作为loss
        optimizer=Adam(lr))

    # if evalONtest == True:
    #     test_model = Model(inputs=[input_tensor], outputs=[output_tensor])
    #
    #     test_model.compile(loss='categorical_crossentropy',  # 交叉熵作为loss
    #                        optimizer=Adam(lr),
    #                        metrics=['accuracy'])
    #     CSV_FILE_PATH2 = testFilePath
    #     data_to_test = load_data(CSV_FILE_PATH2)
    #
    #
    #     # train_x2, test_x2, train_y2, test_y2, Class_dict2
    #     # train_x2 = np.expand_dims(train_x2, axis=2)
    #     # test_x2 = np.expand_dims(test_x2, axis=2)
    #
    # else:
    #     pass

    print('开始加载数据')

    if use_add == True:
        data_to_train, label_to_train_lb = add_all_class_in_mem_return_ori_and_add_data(
            signal_data_path=trainFilePath,
            label_path=key_file,
            which_line=which_line,
            which_letter=which_letter,
            key_length=key_length,
            each_class_number=each_class_number,
            choose_number=choose_number)
    else:
        data_to_train = load_data(trainFilePath)
        # data_to_train = data_to_train[:10000]
        label_to_train = cut_letter(key_file, which_line, which_letter,
                                    key_length)
        label_to_train_lb = to_categorical(label_to_train, 16)

    train_x, test_x, train_y, test_y = train_test_split(data_to_train,
                                                        label_to_train,
                                                        test_size=test_size,
                                                        shuffle=True)

    train_x = np.expand_dims(train_x, axis=2)
    test_x = np.expand_dims(test_x, axis=2)

    X_train_triplet, X_test_triplet = generate_triplet(train_x,
                                                       train_y,
                                                       testsize=0.3,
                                                       ap_pairs=10,
                                                       an_pairs=10)
    print(X_train_triplet.shape)
    print(X_test_triplet.shape)
    # print(X_train_triplet)

    Anchor = X_train_triplet[:, 0, :, :].reshape(-1, 35000, 1)
    Positive = X_train_triplet[:, 1, :, :].reshape(-1, 35000, 1)
    Negative = X_train_triplet[:, 2, :, :].reshape(-1, 35000, 1)
    Anchor_test = X_test_triplet[:, 0, :, :].reshape(-1, 35000, 1)
    Positive_test = X_test_triplet[:, 1, :, :].reshape(-1, 35000, 1)
    Negative_test = X_test_triplet[:, 2, :, :].reshape(-1, 35000, 1)

    Y_dummy = np.empty((Anchor.shape[0], 300))
    Y_dummy2 = np.empty((Anchor_test.shape[0], 1))

    model.fit([Anchor, Positive, Negative],
              y=Y_dummy,
              validation_data=([Anchor_test, Positive_test,
                                Negative_test], Y_dummy2),
              batch_size=512,
              epochs=500)

    print(Anchor.shape)
コード例 #4
0
print('开始加载数据')

# data_to_train,label_to_train_lb = add_all_class_in_mem_return_ori_and_add_data(signal_data_path='/data/wuchenxi/new_simeck_data/signal54400_circle/signal_320_circle/',
#                                                                                        label_path='/data/wuchenxi/new_simeck_data/signal54400_circle/new_simeck_320.txt',
#                                                                                        which_line=0,
#                                                                                        which_letter=0,
#                                                                                        key_length=4*320,
#                                                                                        each_class_number=10,
#                                                                                        choose_number=2)

data_to_train = load_data(
    '/data/wuchenxi/new_simeck_data/signal54400_circle/signal_320_circle/')
# data_to_train = data_to_train[10000:13000]

label_to_train = cut_letter(
    '/data/wuchenxi/new_simeck_data/signal54400_circle/new_simeck_320.txt', 0,
    0, 4 * 320)
label_to_train_lb = to_categorical(label_to_train, 16)

data_to_train = np.expand_dims(data_to_train, axis=2)

eval2 = model.evaluate(
    data_to_train,
    label_to_train_lb,
    verbose=1,
)
"""
all 1 key_1_1
320/320 [==============================] - 7s 23ms/step
acc: [0.034768438152968886, 1.0]