def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path example_shape = X_train.shape[1:] input_layer = Input(shape=example_shape) conv_1 = Conv2D(filters=40, kernel_size=3, padding='same', activation='relu')(input_layer) pool_1 = MaxPool2D(pool_size=(2, 1))(conv_1) conv_2 = Conv2D(filters=20, kernel_size=3, padding='same', activation='relu')(pool_1) flatten = Flatten()(conv_2) predictions = Dense(6, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 100 callbacks = [EarlyStopping(monitor='val_loss', patience=3)] model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path tensorboard_output_dir = args.tensorboard_output_dir def lr_schedule(epoch, lr): if epoch > 50: if epoch % 10 == 0: return lr * 0.95 return lr lr_callback = LearningRateScheduler(lr_schedule) callbacks = [lr_callback, EarlyStopping(monitor='val_loss', patience=3), TensorBoard(log_dir=tensorboard_output_dir, write_images=True, write_grads=True, histogram_freq=5, batch_size=10000)] input_shape = X_train.shape[1:] num_output_classes = y_train.shape[1] input_layer = Input(shape=input_shape) conv_1 = Conv1D(filters=16, kernel_size=4, padding='same', activation='selu', kernel_regularizer=l2(1e-3))(input_layer) pool_1 = MaxPooling1D(pool_size=(5), strides=1)(conv_1) conv_2 = Conv1D(filters=32, kernel_size=4, padding='same', activation='selu', kernel_regularizer=l2(1e-3))(pool_1) pool_2 = MaxPooling1D(pool_size=(4), strides=1)(conv_2) conv_3 = Conv1D(filters=48, kernel_size=4, padding='same', activation='selu', kernel_regularizer=l2(1e-3))(pool_2) pool_3 = MaxPooling1D(pool_size=(3), strides=1)(conv_3) flatten = Flatten()(pool_3) dn_1 = Dense(336, activation='selu')(flatten) drop = Dropout(0.5)(dn_1) predictions = Dense(num_output_classes, activation='softmax')(drop) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 150 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_valid = np.load(args.X_validate) y_train = np.load(args.y_train) y_valid = np.load(args.y_validate) model_save_path = args.model_save_path def lr_schedule(epoch, lr): if epoch > 50: if epoch % 5 == 0: return lr * 0.95 return lr lr_callback = LearningRateScheduler(lr_schedule) callbacks = [lr_callback, EarlyStopping(monitor='val_loss', patience=3)] input_shape = X_train.shape[1:] num_output_classes = y_train.shape[1] input_layer = Input(shape=input_shape) conv_1 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu', kernel_regularizer=l2(0.01))(input_layer) pool_1 = MaxPooling1D(pool_size=(2))(conv_1) conv_2 = SeparableConv1D(filters=40, kernel_size=3, padding='same', activation='relu', kernel_regularizer=l2(0.01))(pool_1) bn_1 = BatchNormalization()(conv_2) flatten = Flatten()(bn_1) predictions = Dense(num_output_classes, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 150 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_valid), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path def lr_schedule(epoch, lr): if epoch > 50: if epoch % 10 == 0: return lr * 0.95 return lr lr_callback = LearningRateScheduler(lr_schedule) callbacks = [lr_callback, EarlyStopping(monitor='val_loss', patience=3)] example_shape = X_train.shape[1:] input_layer = Input(shape=example_shape) conv_1 = Conv2D(filters=20, kernel_size=3, padding='same', activation='relu')(input_layer) conv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(conv_1) bn_1 = BatchNormalization()(conv_2) flatten = Flatten()(bn_1) predictions = Dense(6, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 150 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path input_shape = X_train.shape[1:] num_output_classes = y_train.shape[1] input_layer = Input(shape=input_shape) conv_1 = Conv2D(filters=16, kernel_size=3, padding='same', activation='selu')(input_layer) pool_1 = MaxPooling2D(pool_size=(2, 1))(conv_1) conv_2 = Conv2D(filters=32, kernel_size=3, padding='same', activation='selu')(pool_1) flatten = Flatten()(conv_2) dense_1 = Dense(1042, activation='selu')(flatten) dropout_1 = Dropout(0.25)(dense_1) predictions = Dense(num_output_classes, activation='softmax')(dropout_1) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 50 callbacks = [ EarlyStopping(monitor='val_loss', patience=3), TensorBoard(log_dir='./logs', write_images=True, histogram_freq=0) ] model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path tensorboard_output_dir = args.tensorboard_output_dir def lr_schedule(epoch, lr): if epoch > 50: if epoch % 10 == 0: return lr * 0.95 return lr lr_callback = LearningRateScheduler(lr_schedule) callbacks = [lr_callback, TensorBoard(log_dir=tensorboard_output_dir, write_images=True, write_grads=True, histogram_freq=5)] input_shape = X_train.shape[1:] num_output_classes = y_train.shape[1] input_layer = Input(shape=input_shape) conv_1 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu')(input_layer) pool_1 = MaxPooling1D(pool_size=(2))(conv_1) conv_2 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu')(pool_1) pool_2 = MaxPooling1D(pool_size=(2))(conv_2) conv_3 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu')(pool_2) bn_1 = BatchNormalization()(conv_3) flatten = Flatten()(bn_1) predictions = Dense(num_output_classes, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 50 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)
def main(args): args = parse_train_args(args) global X_train, X_validate, y_train, y_validate global tensorboard_output_dir X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path tensorboard_output_dir = args.tensorboard_output_dir run_optimizer()
def main(args): args = parse_train_args(args) X_train = np.load(args.X_train) X_validate = np.load(args.X_validate) y_train = np.load(args.y_train) y_validate = np.load(args.y_validate) model_save_path = args.model_save_path input_shape = X_train.shape[1:] num_output_classes = y_train.shape[1] input_layer = Input(shape=input_shape) conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer) bn_1 = BatchNormalization()(conv_1) conv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(bn_1) drop_1 = Dropout(0.25)(conv_2) flatten = Flatten()(drop_1) predictions = Dense(num_output_classes, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 100 callbacks = [EarlyStopping(monitor='val_loss', patience=3)] model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks) model.save(model_save_path)