Beispiel #1
0
        out = tf.keras.layers.Dense(N_CLASSES, activation='sigmoid')(out)
        model = tf.keras.models.Model(inputs=model.input, outputs=out)

        if config.optimizer == 'adam':
            opt = Adam(config.lr)
        elif config.optimizer == 'sgd':
            opt = SGD(config.lr, momentum=0.9)
        else:
            opt = RMSprop(config.lr, momentum=0.9)

        if config.l2 > 0:
            model = apply_kernel_regularizer(
                model, tf.keras.regularizers.l2(config.l2))
        model.compile(optimizer=opt,
                      loss='binary_crossentropy',
                      metrics=['AUC'])
        model.summary()
        model.load_weights(NAME)
        print('loaded pretrained model')
        """ DATA """
        # wavs = glob.glob('/codes/2020_track3/t3_audio/*.wav')
        wavs = glob.glob(
            '/media/data1/datasets/ai_challenge/2020_track3/t3_audio/*.wav')
        wavs.sort()
        to_mel = magphase_to_mel(config.n_mels)

        for wav in wavs:
            sample = load_wav(wav)[None, :]  # [1, freq, time, chan2]
            sample = complex_to_magphase(sample)
            sample = to_mel(sample)
Beispiel #2
0
    if config.pretrain:
        model.load_weights(NAME)
        print('loaded pretrained model')

    if config.optimizer == 'adam':
        opt = Adam(config.lr, clipvalue=0.01)
    elif config.optimizer == 'sgd':
        opt = SGD(config.lr, momentum=0.9)
    else:
        opt = RMSprop(config.lr, momentum=0.9)

    if config.l2 > 0:
        model = apply_kernel_regularizer(model,
                                         tf.keras.regularizers.l2(config.l2))
    model.compile(optimizer=opt, loss=custom_loss, metrics=[d_total, cos_sim])
    model.summary()
    """ DATA """
    train_set = make_dataset(config, training=True)
    test_set = make_dataset(config, training=False)
    """ TRAINING """
    callbacks = [
        CSVLogger(NAME.replace('.h5', '.log'), append=True),
        LearningRateScheduler(custom_scheduler(4096, TOTAL_EPOCH / 12)),
        SWA(start_epoch=TOTAL_EPOCH // 2, swa_freq=2),
        ModelCheckpoint(NAME,
                        monitor='val_d_total',
                        save_best_only=True,
                        verbose=1),
        TerminateOnNaN()
    ]
Beispiel #3
0
    out = tf.keras.layers.ReLU()(out)
    out = tf.keras.layers.Dense(30, activation=None)(out)
    model = tf.keras.models.Model(inputs=model.input, outputs=out)

    if config.optimizer == 'adam':
        opt = Adam(config.lr, clipvalue=config.clipvalue)
    elif config.optimizer == 'sgd':
        opt = SGD(config.lr, momentum=0.9, clipvalue=config.clipvalue)
    else:
        opt = RMSprop(config.lr, momentum=0.9, clipvalue=config.clipvalue)

    if config.l2 > 0:
        model = apply_kernel_regularizer(model,
                                         tf.keras.regularizers.l2(config.l2))
    model.compile(
        optimizer=opt,
        loss=mse,  # 'binary_crossentropy',
        metrics=[d_total, baseline])
    model.summary()
    """ DATA """
    train_set = make_dataset(config, training=True)
    test_set = make_dataset(config, training=False)
    print(train_set)
    for x, y in train_set.take(1):
        print(tf.shape(x), tf.shape(y))
    """ TRAINING """
    from train_frame import custom_scheduler
    callbacks = [
        CSVLogger(NAME.replace('.h5', '.log'), append=True),
        # LearningRateScheduler(custom_scheduler(config.n_dim*8, config.epochs/10)),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=config.lr_factor,
Beispiel #4
0
        model = tf.keras.models.Model(inputs=x, outputs=out)
        model.summary()

        lr = config.lr
        if config.optimizer == 'adam':
            opt = Adam(lr, clipvalue=config.clipvalue)
        elif config.optimizer == 'sgd':
            opt = SGD(lr, momentum=0.9, clipvalue=config.clipvalue)
        else:
            opt = RMSprop(lr, momentum=0.9, clipvalue=config.clipvalue)

        if config.l2 > 0:
            model = apply_kernel_regularizer(
                model, tf.keras.regularizers.l2(config.l2))
        model.compile(optimizer=opt,
                      loss=custom_loss(alpha=0.8, l2=config.loss_l2),
                      metrics=[d_total(config.multiplier), cos_sim])
        model.summary()

        if config.pretrain:
            model.load_weights(NAME)
            print('loaded pretrained model')
        """ DATA """
        train_set = make_dataset(config, training=True)
        test_set = make_dataset(config, training=False)
        """ TRAINING """
        callbacks = [
            CSVLogger(NAME.replace('.h5', '.log'), append=True),
            LearningRateScheduler(
                custom_scheduler(4096, TOTAL_EPOCH / 12, config.lr_div)),
            SWA(start_epoch=TOTAL_EPOCH // 2, swa_freq=2),
Beispiel #5
0
        config.lr = cosine_decay_with_warmup(
            config.steps_per_epoch * 5, config.lr,
            config.steps_per_epoch * config.steps)
        if config.optimizer == 'adam':
            opt = Adam(config.lr)
        elif config.optimizer == 'sgd':
            opt = SGD(config.lr, momentum=0.9)
        else:
            opt = RMSprop(config.lr, momentum=0.9)

        if config.l2 > 0:
            model = apply_kernel_regularizer(
                model, tf.keras.regularizers.l2(config.l2))
        model.compile(optimizer=opt,
                      loss='categorical_crossentropy',
                      metrics=['accuracy', 'AUC'])
        model.summary()

        if config.pretrain:
            model.load_weights(NAME)
            print('loadded pretrained model')
        """ DATA """
        # 1. IMPORTING TRAINING DATA & PRE-PROCESSING
        PATH = '/datasets/ai_challenge/icassp'
        prefixes = ['gen']  # , 'noise_train']
        x = [np.load(os.path.join(PATH, f'{pre}_x.npy')) for pre in prefixes]
        y = [np.load(os.path.join(PATH, f'{pre}_y.npy')) for pre in prefixes]
        test_x = [np.load(os.path.join(PATH, '50_x.npy'))]
        test_y = [np.load(os.path.join(PATH, '50_y.npy'))]
Beispiel #6
0
        config.lr = tf.keras.experimental.CosineDecay(config.lr,
                                                      config.epochs *
                                                      config.steps_per_epoch,
                                                      alpha=config.lr * 1e-2)
        if config.optimizer == 'adam':
            opt = Adam(config.lr)
        elif config.optimizer == 'sgd':
            opt = SGD(config.lr, momentum=0.9)
        else:
            opt = RMSprop(config.lr, momentum=0.9)

        if config.l2 > 0:
            model = apply_kernel_regularizer(
                model, tf.keras.regularizers.l2(config.l2))
        model.compile(optimizer=opt,
                      loss='categorical_crossentropy',
                      metrics=['accuracy', 'AUC'])
        # model.summary()

        if config.pretrain:
            model.load_weights(NAME)
            print('loadded pretrained model')
        """ DATA """
        # TRAINING DATA
        PATH = '/codes/generate_wavs'
        backgrounds = pickle.load(
            open(os.path.join(PATH, 'drone_normed_complex.pickle'), 'rb'))
        voices = pickle.load(
            open(os.path.join(PATH, 'voice_normed_complex.pickle'), 'rb'))
        labels = np.load(os.path.join(PATH, 'voice_labels.npy'))
Beispiel #7
0
y_test = np.loadtxt('D:/study/data/test_label.txt')
x_test = x_test[:5138,:,:,:]
print(x_train.shape)
print(y_train.shape)

print(x_test.shape)
print(y_test.shape)
print('끝')

# 2. 모델
model = Sequential()
model.add(EfficientNetB0(include_top=False))
model.add(Dense(20, activation='softmax',name='s1'))
model.summary()
# 3. 훈련
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['acc'])
model.fit(x_train, y_train, batch_size=12, epochs=2, validation_split=0.1)


# 4. 평가, 예측

loss, acc = model.evaluate(x_train, y_train)

y_predict = model.predict(x_test)
print("loss : ", loss)
print("acc : ", acc)

print(y_predict)
print('진짜 끝')