Пример #1
0
def train():
    X = np.load('./data/train_X.npy')
    y = np.load('./data/train_y.npy')

    train_datagen = ImageDataGenerator(rescale=1.0 / 255.)
    validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)
    train_generator = train_datagen.flow(X, y, batch_size=300)
    validation_generator = validation_datagen.flow(X, y)

    nb_epoch = 2000
    nb_train_samples = N
    nb_validation_samples = 600
    old_session = KTF.get_session()
    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)
        model = Sequential()
        with tf.name_scope("inference") as scope:
            model = set_model(model)
        model.summary()
        fpath = './model/weights.hdf5'
        tb_cb = TensorBoard(log_dir="./tensorlog", histogram_freq=1)
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=False,
                                mode='auto',
                                save_weights_only=True)
        history = model.fit_generator(train_generator,samples_per_epoch=nb_train_samples, \
                                                        nb_epoch=nb_epoch, validation_data=validation_generator, \
                                                        nb_val_samples=nb_validation_samples,\
                                                        callbacks=[cp_cb, tb_cb])
Пример #2
0
    def _wrap_savedmodel_around_keras(self, preprocess_function,
                                      postprocess_function, model_name,
                                      model_version, h5_filepath, serve_dir,
                                      channels, optional_preprocess_args,
                                      optional_postprocess_args):
        """ Injects input and output layers with Keras Lambdas, then
            exports to SavedModel.

            Args:
                preprocess_function: A function from the LayerInjector class
                    which preprocesses input.
                postprocess_function: A function from the LayerInjector class
                    which postprocesses output.
                model_name: The name of the model.
                model_version: The version number of the model.
                h5_filepath: The filepath to a .h5 file containing an
                    exported Keras model.
                serve_dir: The path to the model's serve directory.
                channels: The number of channels of the input image.
                optional_preprocess_args: Optional dict of arguments for use
                    with custom preprocess functions.
                optional_postprocess_args: Optional dict of arguments for use
                    with custom postprocess functions.
        """

        # Parses paths
        # Note that the serve directory MUST have a model version subdirectory
        model_version = str(model_version)
        save_path = serve_dir + "/" + model_name + "/" + model_version

        # Instantiates a Keras model
        K.set_learning_phase(0)
        keras_model = load_model(h5_filepath)

        # Instantiates placeholder for image bitstring
        input_bytes = Input(shape=[], dtype=tf.string)

        # Preprocesses image bitstring
        pre_map = {"channels": channels, **optional_preprocess_args}
        input_tensor = Lambda(preprocess_function,
                              arguments=pre_map)(input_bytes)

        # Gets output tensor(s)
        output_tensor = keras_model(input_tensor)

        # Postprocesses output tensor(s)
        post_map = optional_postprocess_args
        output_bytes = Lambda(postprocess_function,
                              arguments=post_map)(output_tensor)

        # Builds new Model
        model = Model(input_bytes, output_bytes)

        # Builds input/output tensor protos
        input_tensor_info = {"input_bytes": build_tensor_info(model.input)}
        output_tensor_info = {"output_bytes": build_tensor_info(model.output)}

        # Creates and saves SavedModel
        self._create_savedmodel(save_path, input_tensor_info,
                                output_tensor_info)
Пример #3
0
def encode_and_decode(mode_auto, exp_condition):
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.compat.v1.Session(config=config)
    keras.backend.set_session(sess)

    old_session = KTF.get_session()

    session = tf.compat.v1.Session('')
    KTF.set_session(session)
    KTF.set_learning_phase(1)

    loader_ins = Loader(exp_condition["load_dir"])
    loader_ins.load(gray=True, size=(196, 136))  # 横×縦
    data = loader_ins.get_data(norm=True)  # (None, Height, Width)

    if mode_auto == "CAE":
        input_shape = (data.shape[1], data.shape[2], 1)
        data = np.reshape(data, (data.shape[0], data.shape[1], data.shape[2], 1))  # (None, Height, Width, 1)
    elif mode_auto == "AE":
        input_shape = (data.shape[1]*data.shape[2],)
        data = np.reshape(data, (data.shape[0], data.shape[1]*data.shape[2],))  # (None, Height*Width, 1)
    else:
        raise Exception

    x_train = data[:int(len(data) * exp_condition["train_rate"])]
    x_val = data[int(len(data) * exp_condition["train_rate"]):]

    train_auto(mode_auto=mode_auto,
               x_train=x_train,
               x_val=x_val,
               input_shape=input_shape,
               weights_dir=exp_condition["weights_dir"],
               batch_size=exp_condition["batch_size"],
               verbose=1,
               epochs=exp_condition["epochs"],
               num_compare=2
               )

    data = loader_ins.get_data(norm=True)
    model_name = get_latest_modified_file_path(exp_condition["weights_dir"])
    print(model_name, "をモデルとして分散表現化します.")
    img2vec(data, model_name, mode_auto=mode_auto, mode_out="hwf")

    KTF.set_session(old_session)
Пример #4
0
def main(_):
    (x_train, y_train), (x_test, y_test) = load_data()
    print(x_train.shape)
    print(x_test.shape)
    x_train = x_train.astype('float32').reshape(-1, 784) / 255.
    x_test = x_test.astype('float32').reshape(-1, 784) / 255.
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    print(x_train.shape)
    print(x_test.shape)

    with tf.Graph().as_default() as graph:
        with tf.Session() as sess:
            K.set_session(sess)
            K.set_learning_phase(1)
            model = Mnist()
            pprint(get_train_ops(graph=model.graph))
Пример #5
0
def model_fn(input_dim, labels_dim, hidden_units, learning_rate=0.1):
    """Create a Keras Sequential model with layers.

  Args:
    input_dim: (int) Input dimensions for input layer.
    labels_dim: (int) Label dimensions for input layer.
    hidden_units: [int] the layer sizes of the DNN (input layer first)
    learning_rate: (float) the learning rate for the optimizer.

  Returns:
    A Keras model.
  """

    # "set_learning_phase" to False to avoid:
    # AbortionError(code=StatusCode.INVALID_ARGUMENT during online prediction.
    K.set_learning_phase(False)
    model = models.Sequential()
    hidden_units = [int(units) for units in hidden_units.split(',')]

    for units in hidden_units:
        model.add(
            layers.Dense(
                units,
                activation=relu,
                input_shape=[input_dim],
                kernel_initializer='glorot_uniform',
            ))
        model.add(layers.Dropout(0.5))
        model.add(
            layers.BatchNormalization(epsilon=1e-03,
                                      momentum=0.9,
                                      weights=None))
        input_dim = units
        #                 activity_regularizer=tf.keras.regularizers.l1(0.01)

    # Add a dense final layer with sigmoid function.
    model.add(layers.Dense(labels_dim, activation='softmax'))
    compile_model(model, learning_rate)
    return model
Пример #6
0
def save_model(model):
    # load tensorflow and keras backend
    import tensorflow as tf
    from tensorflow.python.framework import graph_util
    from tensorflow.python.framework import graph_io
    import keras.backend.tensorflow_backend as K

    ksess = K.get_session()
    print(ksess)

    # transform keras model to tensorflow graph
    # the output will be json-like format
    K.set_learning_phase(0)
    graph = ksess.graph
    kgraph = graph.as_graph_def()
    print(kgraph)

    # define output node
    num_output = 1
    prefix = "output"
    pred = [None]*num_output
    outputName = [None]*num_output
    for i in range(num_output):
        outputName[i] = prefix + str(i)
        pred[i] = tf.identity(model.get_output_at(i), name=outputName[i])
    print('output name: ', outputName)

    # convert variables in the model graph to constants
    constant_graph = graph_util.convert_variables_to_constants(ksess, ksess.graph.as_graph_def(), outputName)

    # save the model in .pb and .txt
    output_dir = "/home/"
    output_graph_name = "keras2tf.pb"
    output_text_name = "keras2tf.txt"
    graph_io.write_graph(constant_graph, output_dir, output_graph_name, as_text=False)
    graph_io.write_graph(constant_graph, output_dir, output_text_name, as_text=True)
    print('saved graph .pb at: {0}\nsaved graph .txt at: {1}'.format(
            os.path.join(output_dir, output_graph_name),
            os.path.join(output_dir, output_text_name)))
Пример #7
0
def model_fn(input_dim,
             labels_dim,
             hidden_units,
             learning_rate=0.1):

  K.set_learning_phase(False)
  model = models.Sequential()
  hidden_units = [int(units) for units in hidden_units.split(',')]

  for units in hidden_units:
      model.add(layers.Dense(units, activation=relu, input_shape=[input_dim],
                            kernel_initializer='glorot_uniform',
                            ))
      # model.add(layers.Dropout(0.5))
      # model.add(layers.BatchNormalization(epsilon=1e-03, momentum=0.9, weights=None))
      input_dim = units
      #                 activity_regularizer=tf.keras.regularizers.l1(0.01)


  # Add a dense final layer with sigmoid function.
  model.add(layers.Dense(labels_dim, activation='softmax'))
  # compile_model(model, learning_rate)
  return model
Пример #8
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session
    old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        fpath = './pretrained_mask/LIP_PSPNet50_mask{epoch:02d}.hdf5'
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=5)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_mask", write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.epoch_steps,
                             epochs=args.n_epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, es_cb, tb_cb])

    # save model
    with open("./pretrained_mask/LIP_SegUNet_mask.json", "w") as json_file:
        json_file.write(json.dumps(json.loads(segunet.to_json()), indent=2))
    print("save json model done...")
Пример #9
0
def Network_config(class_num=4,
                   epoch=200,
                   initial_epoch=0,
                   batch_size=32,
                   train_data=None,
                   train_label=None,
                   test_data=None,
                   test_label=None,
                   fold=0):
    adam = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.000)
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)
    K.set_learning_phase(1)
    base_model = VGG16(input_tensor=Input(shape=(224, 224, 3)),
                       weights='imagenet',
                       include_top=False)

    x = base_model.output
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    predictions = Dense(class_num, activation='softmax')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in (base_model.layers):
        layer.trainable = False
        if layer.name.startswith('bn'):
            layer.call(layer.input, training=False)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[keras.metrics.categorical_accuracy])

    tools.create_directory('./tmpvgg/')
    weights_file = './tmpvgg/' + str(
        fold
    ) + '-weights.{epoch:02d}-{categorical_accuracy:.4f}-{val_loss:.4f}-{val_categorical_accuracy:.4f}.h5'
    csv_file = './tmpvgg/record.csv'
    lr_reducer = ReduceLROnPlateau(monitor='categorical_accuracy',
                                   factor=0.5,
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)
    early_stopper = EarlyStopping(monitor='val_categorical_accuracy',
                                  min_delta=1e-4,
                                  patience=50)

    model_checkpoint = ModelCheckpoint(weights_file,
                                       monitor='val_categorical_accuracy',
                                       save_best_only=True,
                                       verbose=2,
                                       save_weights_only=True,
                                       mode='max')
    tensorboard = TensorBoard(log_dir='./logs/',
                              histogram_freq=0,
                              batch_size=8,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    CSV_record = CSVLogger(csv_file, separator=',', append=True)

    callbacks = [
        lr_reducer, early_stopper, model_checkpoint, tensorboard, CSV_record
    ]
    gc.disable()
    model.fit_generator(
        generator=tools.batch_generator(np.array(train_data),
                                        np.array(train_label), batch_size,
                                        True, class_num),
        steps_per_epoch=int(len(train_label) / batch_size) - 1,
        max_q_size=50,
        initial_epoch=initial_epoch,
        epochs=epoch,
        verbose=1,
        callbacks=callbacks,
        validation_data=tools.batch_generator(np.array(test_data),
                                              np.array(test_label), batch_size,
                                              True, class_num),
        validation_steps=int(len(test_label) / batch_size) - 1,
        class_weight='auto')

    all_y_pred = []
    all_y_true = []
    for test_data_batch, test_label_batch in tools.batch_generator_confusion_matrix(
            np.array(test_data), np.array(test_label), batch_size, True,
            class_num):
        y_pred = model.predict(test_data_batch, batch_size)
        y_true = test_label_batch
        for y_p in y_pred:
            all_y_pred.append(np.where(y_p == max(y_p))[0][0])

        for y_t in y_true:
            all_y_true.append(np.where(y_t == max(y_t))[0][0])
    confusion = confusion_matrix(y_true=all_y_true, y_pred=all_y_pred)
    print(confusion)
    f = open('confusion_matrix.txt', 'a+')
    f.write(str(all_y_true) + "\n")
    f.write(str(all_y_pred) + "\n")
    f.write(str(confusion) + '\n')
    f.close()
    gc.enable()
Пример #10
0
def learning_star(data, label):

    X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=29)

    """
    Add TensorBoard
    """
    old_session = tf.Session('')
    session = tf.Session('')
    KTF.set_session(session)
    KTF.set_learning_phase(1)

    """
    Define the model
    """
    model = Sequential()

    model.add(Conv2D(30, 3, input_shape=(data.shape[1], data.shape[2], data.shape[3])))
    model.add(Activation('relu'))
    model.add(Conv2D(30, 3))
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    model.add(Conv2D(30, 3))
    model.add(Activation('relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('relu'))
    model.add(Dropout(1.0))

    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=1e-4)

    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=["accuracy"])

    """
    Add callbacks (TensorBoard, EarlyStopping)
    """
    tb_cb = keras.callbacks.TensorBoard('./logs/', histogram_freq=1)
    early_stopping = EarlyStopping(monitor='val_loss', mode='min', patience=20)
    cbks = [tb_cb, early_stopping]

    """
    Display Model Summary
    """
    plot_model(model, to_file='./model.png')


    """
    Learning Flow
    """
    history = model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_split=0.1, callbacks=cbks)
    score = model.evaluate(X_test, y_test, verbose=0)
    print('Test Score:', score[0])
    print('Test accuracy:', score[1])

    """
    Save Model
    """
    print('save the architechture of the model')
    json_string = model.to_json()
    open(os.path.join('./', 'cnn_model.json'), 'w').write(json_string)
    yaml_string = model.to_yaml()
    open(os.path.join('./', 'cnn_model.yaml'), 'w').write(yaml_string)

    print('save the weights')
    model.save_weights(os.path.join('./', 'cnn_model_weight.hdf5'))

    """
Пример #11
0
def main():
    parser = train_parser()
    args = parser.parse_args()

    if args.use_cpu:
        # disable gpu completely
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    else:
        # set device number
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    if args.seed != -1:
        # set random seed
        np.random.seed(args.seed)
        random.seed(args.seed)
        tf.set_random_seed(args.seed)

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # determine some variables from args
        dims, input_shape, n_labels, model_dir, checkpoint_path = find_input(
            args)

        os.makedirs(model_dir, exist_ok=True)
        # set callbacks
        # for multiple checkpoints set filepath to "...point{epoch:d}.hdf5"
        cp_cb = MyCheckpoint(filepath=model_dir + "/checkpoint.hdf5",
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=False,
                             mode='auto',
                             period=1)
        tb_cb = TensorBoard(log_dir=model_dir, write_images=True)
        log = CSVLogger(model_dir + "/training.log", append=True)

        # set generator
        if args.use_random:
            # create random data
            train_gen = data_gen_random(dims, n_labels, args.batch_size,
                                        args.use_flow)
            val_gen = data_gen_random(dims, n_labels, args.batch_size,
                                      args.use_flow)
        elif args.use_numpy:
            # load small numpy dataset
            n_labels = args.n_classes
            datagen = DataStorageNpy(args.train_imgs_npy, args.train_mask_npy,
                                     args.train_flow_npy, args.val_percent,
                                     dims, n_labels, args.use_flow,
                                     args.batch_size)

            train_gen = datagen.generate(training=True)
            val_gen = datagen.generate(training=False)
        else:
            # load dataset from folders
            datagen = DataStorage(args.data_dir, args.dataset, dims, n_labels)
            train_gen = datagen.generate(
                args.batch_size,
                args.use_flow,
                args.use_mb,
                args.use_occ,
                args.train_categories,
                split="train",
                shuffle=True,
                normalize_images=not args.no_norm_images)
            val_gen = datagen.generate(
                args.batch_size,
                args.use_flow,
                args.use_mb,
                args.use_occ,
                args.train_categories,
                split="val",
                shuffle=False,
                random_crop=False,
                normalize_images=not args.no_norm_images)
            if args.test_generator:
                imgs, labels = next(train_gen)
                print("datagen yielded", imgs.shape, labels.shape)
                return
            # # test has flow and images, but masks are zero
            # # so it cannot be used to compute accuracy
            # test_gen = datagen.generate(args.batch_size, args.use_flow,
            #                             args.use_mb, args.use_occ,
            #                             split="test")

        # ----- determine class weights
        weight_mask, weight_mask_iou = find_masks(args, n_labels)
        print("using weight mask:", weight_mask)
        print("using weight mask for iou:", weight_mask_iou)

        # ----- define loss
        old_loss = args.loss
        print("old loss", old_loss)
        loss = weighted_categorical_crossentropy(weight_mask, args.weight_mult)

        # ----- define iou metric
        def mean_iou(y_true, y_pred):
            return mean_iou_func(y_true,
                                 y_pred,
                                 args.batch_size,
                                 dims,
                                 n_labels,
                                 weight_mask_iou,
                                 iou_test=False)

        # ----- restart or load model
        restart = args.restart
        if not args.restart:
            # check if model is available
            if not os.path.exists(checkpoint_path):
                print("no previous model available, restarting from epoch 0")
                restart = True

        print("model input shape", input_shape)
        if restart:
            # set model
            pspnet = PSPNet50(input_shape=input_shape,
                              n_labels=n_labels,
                              output_mode=args.output_mode,
                              upsample_type=args.upsample_type)

            # compile model
            pspnet.compile(loss=loss,
                           optimizer=args.optimizer,
                           metrics=["accuracy", mean_iou])
            # metrics=["acc_iou", mean_iou])
            starting_epoch = 0

        else:
            # load model from dir
            try:
                pspnet = load_model(checkpoint_path,
                                    custom_objects={
                                        'mean_iou': mean_iou,
                                        'loss': loss
                                    })
            except OSError:
                raise OSError("failed loading checkpoint", checkpoint_path)
            except ValueError:
                raise ValueError(
                    "possible the checkpoint file is corrupt because the "
                    "script died while saving. use the backup old_checkpoint")
            try:
                with open(model_dir + "/epochs.txt", "r") as fh:
                    starting_epoch = int(fh.read())
            except OSError:
                raise FileNotFoundError("could not find epochs.txt")
            print("reloading model epoch", starting_epoch)

        # print model summary (weights, layers etc)
        if args.summary:
            print(pspnet.summary())

        # ----- fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.epoch_steps,
                             epochs=args.n_epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, tb_cb, log],
                             initial_epoch=starting_epoch)
Пример #12
0
    output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
    # print("target shape=", target.shape)
    wei = target[:, :, :, :, -1:]
    target = target[:, :, :, :, :-1]
    # print("target shape=", target.shape, ", wei shape=", wei.shape, "output shape ", output.shape)
    output = output[:, :, :, :, :-1]
    output = math_ops.log(output / (1 - output))
    retval = nn.weighted_cross_entropy_with_logits(
        targets=target, logits=output,
        pos_weight=10)  #900=works #2900=200x200, 125=30x30
    retval = retval * (wei + 0.01)
    # print("output xentr ", retval)
    return tf.reduce_sum(retval, axis=None) / (tf.reduce_sum(wei, axis=None))


K.set_learning_phase(0)
if args.theano_backend:
    K.set_image_data_format('channels_first')
else:
    K.set_image_data_format('channels_last')

try:
    net_model = load_model(weight_file_path,
                           custom_objects={
                               'loss_mse_select_clipped':
                               loss_mse_select_clipped,
                               'loss_ROIsoft_crossentropy':
                               loss_ROI_crossentropy,
                               '_to_tensor': _to_tensor
                           })
except ValueError as err:
Пример #13
0
def run():
    env = RoboEnv(client)
    nb_actions = env.action_space.n

    for i in range(50):
        print('simulation start: %d' % i)
        old_session = KTF.get_session()

        with tf.Graph().as_default():
            session = tf.Session('')
            KTF.set_session(session)
            KTF.set_learning_phase(1)

            if not os.path.isfile(model_save_path):
                model = Sequential()
                model.add(
                    Flatten(input_shape=(1, ) + env.observation_space.shape))
                model.add(Dense(16))
                model.add(Activation('relu'))
                model.add(Dense(16))
                model.add(Activation('relu'))
                model.add(Dense(16))
                model.add(Activation('relu'))
                model.add(Dense(nb_actions))
                model.add(Activation('linear'))
            else:
                with open(model_save_path, 'r') as f:
                    model = model_from_json(f.read())

            if os.path.isfile(weight_save_path):
                model.load_weights(weight_save_path)
            print(model.summary())

            # experience replay用のmemory
            memory = SequentialMemory(limit=50000, window_length=1)
            # 行動方策はオーソドックスなepsilon-greedy。ほかに、各行動のQ値によって確率を決定するBoltzmannQPolicyが利用可能
            policy = EpsGreedyQPolicy(eps=0.1)
            dqn = DQNAgent(model=model,
                           nb_actions=nb_actions,
                           memory=memory,
                           nb_steps_warmup=100,
                           target_model_update=1e-2,
                           policy=policy)
            dqn.compile(Adam(lr=1e-3), metrics=['mae'])

            tb = TensorBoard(log_dir='data/tensorboard',
                             histogram_freq=0,
                             write_graph=True)

            history = dqn.fit(env,
                              nb_steps=50000,
                              visualize=False,
                              callbacks=[tb],
                              verbose=2,
                              nb_max_episode_steps=5000)

        json_string = model.to_json()
        with open(model_save_path, 'w') as f:
            f.write(json_string)
        model.save_weights(weight_save_path)

        with open(
                'data/history/history%s.pickle' %
                time.strftime('%Y%m%d-%H%M%S'), 'wb') as f:
            pickle.dump(history.history, f)

        KTF.set_session(old_session)
Пример #14
0
from keras.utils import np_utils
from keras.models import model_from_json

M = 10

# data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('f') / 255.0  # (60000, 28, 28)
x_test = x_test.astype('f') / 255.0
y_train = np_utils.to_categorical(y_train, M)  # int -> one-of-vector
y_test = np_utils.to_categorical(y_test, M)

with tf.Graph().as_default():
    session = tf.Session('')
    K.set_session(session)
    K.set_learning_phase(1)

    try:
        json_file = open('model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model = model_from_json(loaded_model_json)
        loaded_model.load_weights("model.h5")
        print("Loaded model from disk")
    except:
        # model construction
        model = Sequential()
        model.add(Reshape((28, 28, 1),
                          input_shape=(28, 28)))  # tensorflow-order!!
        model.add(Conv2D(8, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(16, (5, 5), strides=(2, 2), activation='relu'))
Пример #15
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # class weights
        classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
                'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
                'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe','rightShoe']
        if args.class_weights:
            """
            class_weights = {0:1, 1:40, 2:1, 3:114, 4:151, 5:3, 6:53, 7:7, 8:165, 9:7, 10:106,
                    11:249, 12:150, 13:1, 14:1, 15:1, 16:1, 17:1, 18:114, 19:118}
            """
            class_weights = [1, 40, 1, 114, 151, 3, 53, 7, 165, 7, 106, 249, 150, 1, 1, 1, 1, 1, 114, 118]

        # set callbacks
        fpath = "./pretrained_class_weights/LIP_PSPNet50_class_weights{epoch:02d}.hdf5"
        cp_cb = ModelCheckpoint(filepath = fpath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=2)
        es_cb = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_class_weights", write_images=True)

        # set generater
        train_gen = data_gen_small(
                trainimg_dir,
                trainmsk_dir,
                train_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)
        val_gen = data_gen_small(
                valimg_dir,
                valmsk_dir,
                val_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)

        # set model
        pspnet = PSPNet50(
                input_shape=args.input_shape,
                n_labels=args.n_labels,
                output_mode=args.output_mode,
                upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(
                loss=args.loss,
                optimizer=args.optimizer,
                metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(
                generator=train_gen,
                steps_per_epoch=args.epoch_steps,
                epochs=args.n_epochs,
                validation_data=val_gen,
                validation_steps=args.val_steps,
                class_weight=class_weights,
                callbacks=[cp_cb, es_cb, tb_cb])

    # save model
    with open("./pretrained_class_weights/LIP_PSPNet50.json", "w") as json_file:
        json_file.write(json.dumps(json.loads(pspnet.to_json()), indent=2))
    print("save json model done...")
Пример #16
0
def main(fname_lift_train,
         fname_shape_train,
         fname_lift_test,
         fname_shape_test,
         case_number,
         case_type=3,
         env="Lab"):
    r_rate = [1, 2, 4, 8]
    for rr in r_rate:
        if rr == 1:
            s_odd = 0  # 全部読みだす
        elif fname_shape_train.find("fourier") != -1:
            s_odd = 3  # 前方から読み出す(fourier用)
        else:
            s_odd = 4  # 全体にわたって等間隔に読み出す(equidistant, dense用)

        old_session = KTF.get_session()

        with tf.Graph().as_default():
            source = "Incompressible_Invicid\\training_data\\"
            if env == "Lab":
                source = "D:\\Toyota\\Data\\" + source
                case_num = get_case_number(source, env, case_number)
                log_name = "learned\\" + case_num + "_tb_log.hdf5"
                json_name = "learned\\" + case_num + "_mlp_model_.json"
                weight_name = "learned\\" + case_num + "_mlp_weight.h5"
            elif env == "Colab":
                source = "/content/drive/Colab Notebooks/" + source.replace(
                    "\\", "/")
                case_num = get_case_number(source, env, case_number)
                log_name = "learned/" + case_num + "_log.hdf5"
                json_name = "learned/" + case_num + "_mlp_model_.json"
                weight_name = "learned/" + case_num + "_mlp_weight.h5"

            session = tf.Session('')
            KTF.set_session(session)
            KTF.set_learning_phase(1)

            model = Sequential()
            if case_type == 3:
                X_train, y_train = read_csv_type3(source,
                                                  fname_lift_train,
                                                  fname_shape_train,
                                                  shape_odd=s_odd,
                                                  read_rate=rr)
                x_test, y_test = read_csv_type3(source,
                                                fname_lift_test,
                                                fname_shape_test,
                                                shape_odd=s_odd,
                                                read_rate=rr)

            input_vector_dim = X_train.shape[1]
            with tf.name_scope("inference") as scope:
                model.add(Dense(units=2, input_dim=input_vector_dim))
                model.add(LeakyReLU())
                model.add(Dense(units=16))
                model.add(LeakyReLU())
                """
                model.add(Dense(units=128))
                model.add(LeakyReLU())
                model.add(Dense(units=192))
                model.add(LeakyReLU())
                model.add(Dense(units=2048))
                model.add(LeakyReLU())
                model.add(Dense(units=2048))
                model.add(LeakyReLU())
                """
                """
                model.add(Dense(units=512))
                model.add(LeakyReLU())
    
                for i in range(5):
                    model.add(Dense(units = 512))
                    model.add(LeakyReLU())
                    # model.add(Dropout(0.5))
                # model.add(Dense(units=half, activation='relu'))
                # model.add(Dropout(0.5))
                """
                model.add(Dense(units=1))

            model.summary()

            save_my_log(source, case_number, fname_lift_train,
                        fname_shape_train, model.summary())
            # es_cb = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
            tb_cb = TensorBoard(log_dir=source + log_name,
                                histogram_freq=0,
                                write_grads=True)

            model.compile(loss="mean_squared_error", optimizer='Adam')

            batch_size = 500
            train_steps, train_batches = batch_iter(X_train, y_train,
                                                    batch_size)
            valid_steps, valid_batches = batch_iter(x_test, y_test, batch_size)
            """
            model.fit(x=X_train, y=y_train,
                      batch_size=600, nb_epoch=1000,
                      validation_split=0.05, callbacks=[tb_cb])
            """
            model.fit_generator(train_batches,
                                train_steps,
                                epochs=1000,
                                validation_data=valid_batches,
                                validation_steps=valid_steps,
                                callbacks=[tb_cb])
            # X_train: [number, angle, shape001, shape002, ..., shapeMAX]
            # y_train: [number, lift]
            # 適当に中央付近の翼を抜き出しての-40-38degreeをプロットさせてみる
            tekito = 1306 * 40  # NACA2613 or NACA2615
            plt.figure()
            plt.plot(X_train[tekito:tekito + 40, 0],
                     y_train[tekito:tekito + 40])
            plt.plot(X_train[tekito:tekito + 40, 0],
                     model.predict(X_train)[tekito:tekito + 40])
            plt.savefig(source + case_num + "_train.png")

            y_predict = model.predict(x_test)
            tekito = (99 + 13) * 40  # 22012
            plt.figure()
            plt.plot(x_test[tekito:tekito + 40, 0], y_test[tekito:tekito + 40])
            plt.plot(x_test[tekito:tekito + 40, 0],
                     y_predict[tekito:tekito + 40])
            plt.savefig(source + case_num + "_test.png")

        json_string = model.to_json()
        open(source + json_name, 'w').write(json_string)
        model.save_weights(source + weight_name)
        KTF.set_session(old_session)
    def cnn_train(self, X_train, Y_train, X_test, Y_test):
        conv1 = self.conv1
        conv2 = self.conv2
        conv3 = self.conv3
        dense1 = self.dense1
        dense2 = self.dense2
        # ニュートラルネットワークで使用するモデル作成
        old_session = KTF.get_session()
        # old_session = tf.compat.v1.keras.backend.get_session()
        with tf.Graph().as_default():
            session = tf.Session('')
            KTF.set_session(session)
            KTF.set_learning_phase(1)

            model = Sequential()

            model.add(
                Conv2D(conv1,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=(X_train.shape[1:])))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Conv2D(conv2, (3, 3), activation='relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Conv2D(conv3, (3, 3), activation='relu'))
            # model.add(Dropout(0.25))
            # model.add(Conv2D(128, (3, 3), padding='same',activation='relu'))
            # model.add(MaxPooling2D(pool_size=(2, 2)))
            # model.add(Dropout(0.25))
            model.add(Flatten())
            model.add(Dense(dense1, activation='relu'))
            # model.add(Dropout(0.5))
            model.add(Dense(self.classnum, activation='softmax'))
            model.summary()
            # optimizer には adam を指定
            adam = keras.optimizers.Adam(lr=self.learning_rate)

            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])
            # model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
            es_cb = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=0,
                                                  patience=100,
                                                  verbose=0,
                                                  mode='auto')
            tb_cb = keras.callbacks.TensorBoard(log_dir=self.f_log,
                                                histogram_freq=1)
            # cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,'cnn_model{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-vloss{val_loss:.2f}-vacc{val_acc:.2f}.hdf5'), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
            # cbks = [es_cb, tb_cb, cp_cb]
            cbks = [es_cb, tb_cb]
            # cbks = [tb_cb]
            history = model.fit(X_train,
                                Y_train,
                                batch_size=self.nb_batch,
                                epochs=self.nb_epoch,
                                validation_data=(X_test, Y_test),
                                callbacks=cbks,
                                verbose=1)
            score = model.evaluate(X_test, Y_test, verbose=0)
            print('Test score:', score[0])
            print('Test accuracy:', score[1])
            print('save the architecture of a model')
            json_string = model.to_json()
            open(os.path.join(self.f_model, 'cnn_model.json'),
                 'w').write(json_string)
            yaml_string = model.to_yaml()
            open(os.path.join(self.f_model, 'cnn_model.yaml'),
                 'w').write(yaml_string)
            print('save weights')
            model.save_weights(os.path.join(self.f_model, 'cnn_weights.hdf5'))
        KTF.set_session(old_session)
        return
    # https://keras.io/getting-started/faq/#how-can-i-obtain-the-output-of-an-intermediate-layer

    if SAVE_HIDDEN:
        layers_of_interest = [
            'conv1d_1', 'conv1d_2', 'reshape_2', 'lstm_1', 'dense_1', 'dense_2'
        ]
        np.random.seed(0)
        rand_indices = np.random.randint(0, x_test.shape[0], 250)
        print('Saving hidden layers: ', layers_of_interest)
        tfs.get_keras_layers(
            model,
            layers_of_interest,
            x_test[rand_indices],
            y_test[rand_indices],
            output_dir=tfs.prep_dir(
                'I:/_ecg_data_backup/classification/hidden_layers'),
            fname='rat_hidden_all_' + file_name + '.mat')

    print('Elapsed Time (ms): ', tfs.current_time_ms() - start_time_ms)
    print('Elapsed Time (min): ',
          (tfs.current_time_ms() - start_time_ms) / 60000)

    data_dict = {'x_val': x_test, 'y_val': y_test, 'y_prob': yy_probabilities}
    savemat(tfs.prep_dir(output_folder) + file_name + '.mat', mdict=data_dict)
tf_backend.set_learning_phase(0)
tfs.export_model_keras(keras_model_name,
                       tfs.prep_dir("graph_rat"),
                       model_name=description)

# Results:
Пример #19
0
 def __enter__(self):
     self.learning_phase_placeholder = K.learning_phase()
     K.set_learning_phase(self.value)
Пример #20
0
def main(args):
    aggregator = Aggregator(_base_dir = args.base_dir,
                            _img_dir = args.img_dir,
                            _label_dir = args.label_dir,
                            _inf_dir = args.inf_dir,
                            _dag_dir = args.dag_dir,
                            _poses_dir = args.poses_dir)
    print('Num of ground truth labeled images %d\n\n' % len(aggregator.agg_list))

    for i in range(0, 100):
        print('\nDAgger Iteration: %d\n' % aggregator.dag_it_num)
        # creates new directories each iteration
        aggregator.on_new_iter()
        # returns the training, validation lists and knowledge of index at which index evaluation in agg_list starts
        train, val, idx_eval = aggregator.get_training_data()
        # set directory to save predictions of inference
        inf_dir = aggregator.dag_dir + '%02d/inf/' % aggregator.dag_it_num
        # initiates the evaluator
        evaluator = Evaluator(aggregator.base_dir, inf_dir, aggregator.label_dir,
                              _agg_list = aggregator.agg_list)
        # estimating a batch each process should evaluate later to don't exceed a given process number
        evaluator.estimate_batch_size(len(aggregator.agg_list[idx_eval:]))

        print('Evaluating %d images in %d threads' % (evaluator.batch_size, evaluator.num_max_threads))

        if aggregator.dag_done or evaluator.stop_dagger:
            aggregator.save_list()

            print('DAgger stopped!')
            break

        aggregator.save_list(train, 'train')
        aggregator.save_list(val, 'val')

        # Training and Prediction

        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        with tf.Graph().as_default():
            session = tf.Session('')
            KTF.set_session(session)
            KTF.set_learning_phase(1)
            # initializes a trainer with already separated training and validation list
            # an inference is done for all images in the agg_list after the idx_eval
            trainer = Trainer(_train_list = train,
                              _val_list = val,
                              _inf_list = aggregator.agg_list[idx_eval:],
                              _base_dir = aggregator.base_dir,
                              _img_dir = aggregator.img_dir,
                              _label_dir = aggregator.label_dir,
                              _inf_dir = aggregator.inf_dir,
                              _dag_dir = aggregator.dag_dir,
                              _log_dir = 'log/')
            # setting of some hyper parameters
            trainer.batch_size = 8
            # increasing epoch steps so the net has the chance to see all images of the training set in an epoch
            # else it is observable that the validation loss doesn't decrease and the training is stopped
            trainer.epoch_steps = len(train) // trainer.batch_size
            trainer.val_steps = len(val) // trainer.batch_size
            trainer.n_epochs = 25
            trainer.dag_it = aggregator.dag_it_num
            trainer.update_callback()
            # trains model for defined number of epochs with the actual dataset
            trainer.train()
            print('\nTraining done!\nStarting Prediction\n')
            # safes inferences of images that are unseen by the net
            trainer.predict()
            session.close()

        print('\nInference done!\n')
        print('Evaluating %d images' % len(aggregator.agg_list[idx_eval:]))
        # Training and prediction done

        # Evaluation

        aggregator.agg_list = evaluator.process_prediction(agg_chunk = aggregator.agg_list,
                                                           idx_eval = idx_eval)
        print('Evaluation done. Saving evaluated data.')
        aggregator.save_list(aggregator.agg_list[idx_eval:], 'eval')
        # Evaluation done and saved for next iteration

        # save full aggregation list with all information of all iterations until this in iteration's folder
        aggregator.save_list()
        # delete all images of inference step to save space on the drive
        aggregator.delete_inf()
        aggregator.prepare_next_it()
    def cnn_train_noneval(self, X_train, Y_train):
        conv1 = self.conv1
        conv2 = self.conv2
        conv3 = self.conv3
        dense1 = self.dense1
        dense2 = self.dense2
        # ニュートラルネットワークで使用するモデル作成
        old_session = KTF.get_session()
        #old_session = tf.compat.v1.keras.backend.get_session()
        with tf.Graph().as_default():
            session = tf.Session('')
            KTF.set_session(session)
            KTF.set_learning_phase(1)

            model = Sequential()

            model.add(
                Conv2D(conv1,
                       kernel_size=(3, 3),
                       activation='relu',
                       input_shape=(X_train.shape[1:])))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Conv2D(conv2, (3, 3), activation='relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(Conv2D(conv3, (3, 3), activation='relu'))
            # model.add(Dropout(0.25))
            # model.add(Conv2D(128, (3, 3), padding='same',activation='relu'))
            # model.add(MaxPooling2D(pool_size=(2, 2)))
            # model.add(Dropout(0.25))
            model.add(Flatten())
            model.add(Dense(dense1, activation='relu'))
            # model.add(Dropout(0.5))
            model.add(Dense(self.classnum, activation='softmax'))
            model.summary()
            # optimizer には adam を指定
            adam = keras.optimizers.Adam(lr=self.learning_rate)

            model.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          metrics=['accuracy'])
            # model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

            history = model.fit(X_train,
                                Y_train,
                                batch_size=self.nb_batch,
                                epochs=self.nb_epoch,
                                validation_data=None,
                                callbacks=None,
                                verbose=1)

            print('save the architecture of a model')
            json_string = model.to_json()
            open(os.path.join(self.f_model, 'cnn_model.json'),
                 'w').write(json_string)
            yaml_string = model.to_yaml()
            open(os.path.join(self.f_model, 'cnn_model.yaml'),
                 'w').write(yaml_string)
            print('save weights')
            model.save_weights(os.path.join(self.f_model, 'cnn_weights.hdf5'))
        KTF.set_session(old_session)
        return
Пример #22
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num

    print(os.environ["CUDA_VISIBLE_DEVICES"])
    # set the necessary directories
    train_dir = 'resources/train'
    test_dir = 'resources/test'

    train_list = []
    train_list_file = "resources/train_list.txt"
    val_list_file = "resources/val_list.txt"

    with open(train_list_file, "r") as f:
        for l in f:
            train_list.append(l.replace("\n", ""))
    val_list = []
    with open(val_list_file, "r") as f:
        for l in f:
            val_list.append(l.replace("\n", ""))

    with tf.Graph().as_default():
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        session = tf.Session(config=config)
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath='resources/checkpoints/checkpoint',
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir='resources/logs/', write_images=True)
        csv_logger = CSVLogger('resources/logs/training.log')

        # set generater
        train_gen = data_gen_small('resources/train/', 'resources/train/',
                                   train_list, args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small('resources/val/', 'resources/val/', val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        model = segunet(args.input_shape, args.n_labels, args.kernel,
                        args.pool_size, args.output_mode)
        print(model.summary())

        # compile model
        model.compile(loss=args.loss,
                      optimizer=args.optimizer,
                      metrics=["accuracy"])

        # fit with genarater
        model.fit_generator(generator=train_gen,
                            steps_per_epoch=args.epoch_steps,
                            epochs=args.n_epochs,
                            validation_data=val_gen,
                            validation_steps=args.val_steps,
                            callbacks=[cp_cb, es_cb, tb_cb, csv_logger])

        model.save_weights("resources/weights/weights_01.hdf5")
Пример #23
0
def dnn_classification(train_x, train_y, test_x, test_y, class_number, base_dir, from_frequency, to_frequency, frequency_list):
    # conv1 = 30
    nb_epoch = 10000
    nb_batch = 32
    learning_rate = 1e-2
    try:  ##convolutionを使う場合
        conv1

        train_x.resize(train_x.shape[0], train_x.shape[1], 1)
        test_x.resize(test_x.shape[0], test_x.shape[1], 1)
    except:
        pass

    dense1 = 60
    dense2 = 30
    dense3 = 14
    dense4 = class_number
    regularizers_l2_1 = 0
    regularizers_l2_2 = 0
    regularizers_l2_3 = 0

    try:
        model_structure = 'conv{0}relu_{1}relul2{2}_{3}relul2{4}_{5}relul2{6}_{7}softmax'.format(conv1, dense1,
                                                                                                 regularizers_l2_1,
                                                                                                 dense2,
                                                                                                 regularizers_l2_2,
                                                                                                 dense3,
                                                                                                 regularizers_l2_3,
                                                                                                 dense4)
    except:
        model_structure = '{0}relul2{1}_{2}relul2{3}_{4}relul2{5}_{6}softmax'.format(dense1, regularizers_l2_1, dense2,
                                                                                     regularizers_l2_2, dense3,
                                                                                     regularizers_l2_3, dense4)
    f_log = base_dir + '/logs/fit' + 'freq' + str(
        from_frequency) + 'to' + str(to_frequency) + 'num' + str(
        len(frequency_list)) + '/' + model_structure + '_lr' + str(learning_rate) + '/Adam_epoch' + str(
        nb_epoch) + '_batch' + str(nb_batch)
    # print(f_log)
    f_model = base_dir + '/model'  + 'freq' + str(
        from_frequency) + 'to' + str(to_frequency) + 'num' + str(
        len(frequency_list)) + '/' + model_structure + '_lr' + str(learning_rate) + '/Adam_epoch' + str(
        nb_epoch) + '_batch' + str(nb_batch)
    os.makedirs(f_model, exist_ok=True)
    # ニュートラルネットワークで使用するモデル作成
    old_session = KTF.get_session()
    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)
        model = keras.models.Sequential()
        try:
            model.add(Conv1D(conv1, 4, padding='same', input_shape=(train_x.shape[1:]), activation='relu'))
            model.add(Flatten())
            model.add(Dense(dense1, activation='relu', kernel_regularizer=regularizers.l2(regularizers_l2_1)))
        except:
            model.add(Dense(dense1, activation='relu', kernel_regularizer=regularizers.l2(regularizers_l2_1),
                            input_shape=(train_x.shape[1:])))

        # model.add(Dropout(0.25))
        model.add(Dense(dense2, activation='relu', kernel_regularizer=regularizers.l2(regularizers_l2_2)))
        # model.add(Dropout(0.25))
        model.add(Dense(dense3, activation='relu', kernel_regularizer=regularizers.l2(regularizers_l2_3)))
        model.add(Dense(dense4, activation='softmax'))

        model.summary()
        # optimizer には adam を指定
        adam = keras.optimizers.Adam(lr=learning_rate)

        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        # model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

        train_y = np.array(train_y)
        test_y = np.array(test_y)
        # print(test_y)
        # print(test_y.shape)
        # print(type(test_y))
        es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=1000, verbose=0, mode='auto')
        tb_cb = keras.callbacks.TensorBoard(log_dir=f_log, histogram_freq=1)
        # cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,'tag_model{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-vloss{val_loss:.2f}-vacc{val_acc:.2f}.hdf5'), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
        # cbks = [es_cb, tb_cb, cp_cb]
        cbks = [es_cb, tb_cb]
        history = model.fit(train_x, train_y, batch_size=nb_batch, epochs=nb_epoch,
                            validation_data=(test_x, test_y), callbacks=cbks, verbose=1)
        score = model.evaluate(test_x, test_y, verbose=0)
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        predict = model.predict(test_x)
        # print('predict:{}'.format(predict))
        print('save the architecture of a model')
        json_string = model.to_json()
        open(os.path.join(f_model, 'tag_model.json'), 'w').write(json_string)
        yaml_string = model.to_yaml()
        open(os.path.join(f_model, 'tag_model.yaml'), 'w').write(yaml_string)
        print('save weights')
        model.save_weights(os.path.join(f_model, 'tag_weights.hdf5'))
    KTF.set_session(old_session)
    best_pred = []
    probability = []
    category = np.arange(1, class_number+1)
    for (i, pre) in enumerate(predict):
        y = pre.argmax()  # preがそれぞれの予測確率で一番高いものを取ってきている。Y_testはone-hotベクトル
        best_pred.append(category[y])
        probability.append(pre[y])
    return best_pred, probability
Пример #24
0
        MultiprocessIterator(
            valid,
            batch_size=8,
            #repeat=False,
            shuffle=False,
            n_processes=12,
            n_prefetch=120,
            shared_mem=1000 * 1000 *
            5))  # type: Iterator[Tuple[np.ndarray, np.ndarray]]

    old_session = tensorflow_backend.get_session()

    with K.tf.Graph().as_default():
        session = K.tf.Session("")
        tensorflow_backend.set_session(session)
        tensorflow_backend.set_learning_phase(1)

        loss = ""  # type: Union[Callable, str]
        metrics = []  # type: List[Union[Callable, str]]
        input_shape = (resize_shape[0], resize_shape[1], 3)
        if args.dice_coef:
            output_ch = 1
            loss = dice_coef_loss
            metrics = [dice_coef]
            filename = "_weights.epoch{epoch:04d}-val_loss{val_loss:.2f}-val_dice_coef{val_dice_coef:.2f}.hdf5"
        else:
            output_ch = 2
            loss = "categorical_crossentropy"
            metrics = ['accuracy']
            filename = "_weights.epoch{epoch:04d}-val_loss{val_loss:.2f}-val_acc{val_acc:.2f}.hdf5"
Пример #25
0
def main(args):
    # device number
    if args.gpu_num:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        cp_cb = ModelCheckpoint(filepath=args.log_dir,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=2)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir=args.log_dir, write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.epoch_steps,
                             epochs=args.n_epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, es_cb, tb_cb])
Пример #26
0
def main(args):
    # set the necessary list
    # train_list = pd.read_csv(args.train_list,header=None)
    # val_list = pd.read_csv(args.val_list,header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session
    # old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        fpath = './pretrained_mask/' + args.name + '{epoch:02d}.hdf5'
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                mode='auto',
                                period=1)
        tb_cb = TensorBoard(log_dir="./pretrained_mask",
                            write_graph=True,
                            write_images=True)

        seq = iaa.Sequential(
            [
                iaa.Crop(
                    px=(0, 16)
                ),  # crop images from each side by 0 to 16px (randomly chosen)
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                sometimes(
                    iaa.Affine(
                        scale={
                            "x": (0.8, 1.2),
                            "y": (0.8, 1.2)
                        },
                        # scale images to 80-120% of their size, individually per axis
                        translate_percent={
                            "x": (-0.2, 0.2),
                            "y": (-0.2, 0.2)
                        },  # translate by -20 to +20 percent (per axis)
                        rotate=(-10, 10),  # rotate by -45 to +45 degrees
                    )),
            ],
            random_order=True)
        if args.dataset == 'coco':
            train_gen = Datahandler_COCO(trainimg_dir,
                                         trainmsk_dir).make_batches(
                                             batchsize=args.batch_size,
                                             inputshape=args.input_shape,
                                             augmentation=seq)
            val_gen = Datahandler_COCO(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)
        elif args.dataset == 'pascal_khamba':
            train_gen = Pascal_Generator(trainimg_dir,
                                         trainmsk_dir).make_batches(
                                             batchsize=args.batch_size,
                                             inputshape=args.input_shape,
                                             augmentation=seq)
            val_gen = Pascal_Generator(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)

        else:
            train_gen = Default_Generator(trainimg_dir,
                                          trainmsk_dir).make_batches(
                                              batchsize=args.batch_size,
                                              inputshape=args.input_shape,
                                              augmentation=seq)
            val_gen = Default_Generator(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())
        if args.load is not None:
            print("loadinf weights")
            pspnet.load_weights(args.load)

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.steps,
                             epochs=args.epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, tb_cb],
                             verbose=True)

    # save model
    with open("./pretrained_mask/" + args.name + ".json", "w") as json_file:
        json_file.write(json.dumps(json.loads(pspnet.to_json()), indent=2))

    print("save json model done...")
Пример #27
0
def run_cnn(X_train,
            X_val,
            X_test,
            network_func,
            n_classes=30,
            img_shape=(128, 128, 3),
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'],
            callbacks=None,
            batch_size=32,
            steps_per_epoch=64,
            num_epochs=300,
            validation_steps=50,
            path_save_weights=None,
            path_plot_model=None,
            dropout=False):

    train_iter = batch_generator.Batch_generator(X_train,
                                                 image_shape=img_shape,
                                                 num_classes=n_classes,
                                                 batch_size=batch_size,
                                                 segment=False)
    val_iter = batch_generator.Batch_generator(X_val,
                                               image_shape=img_shape,
                                               num_classes=n_classes,
                                               batch_size=batch_size,
                                               segment=False)

    old_session = KTF.get_session()

    # with tf.Graph().as_default():
    session = tf.Session()
    KTF.set_session(session)

    # if using dropout (a situation when networks would be different between training and testing phase)
    if dropout:
        KTF.set_learning_phase(1)

    inputs = layers.Input(img_shape)
    output_layer = network_func(inputs, n_classes)
    model = models.Model(inputs, output_layer)
    model.summary()
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    history = model.fit_generator(train_iter,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  validation_data=val_iter,
                                  validation_steps=validation_steps,
                                  callbacks=callbacks,
                                  workers=2)

    # save model
    # model.save('./test_log/models/fcn_model.hdf5')
    if path_save_weights != None:
        try:
            model.save_weights(path_save_weights)
        except:
            print("Could not save weigths data.")

    if path_plot_model != None:
        try:
            plot_model(model, to_file=path_plot_model)
        except:
            print("Could not plot model properly.")

    # test the prediction
    print("\n")
    print("Testing model...")
    score = model_helper.predict_from_path(model, X_test, img_shape, n_classes)
    print("\n")
    print("=====" * 13)
    print("\n")
    print("Test score    : {:.6f}".format(score[0]))
    print("Test accuracy : {:.6f}\n".format(score[1]))

    KTF.set_session(old_session)
Пример #28
0
def main():
    base_dir = '/media/localadmin/Test/11Nils/kitti/dataset/sequences/Data/'
    label_dir = 'labels/'
    train_img_dir = 'images/'
    eval_base_dir = '/media/localadmin/Test/11Nils/kitti/dataset/sequences/08/'
    eval_lbl_dir = 'labels/'
    eval_img_dir = 'image_2/'
    inf_dir = 'pooling_test/'
    test_no_indices = True
    if test_no_indices:
        inf_dir_tested = 'MaxPooling2D/'
        log = 'log/'
        gpu_num = '0, 1, 2, 3'
    else:
        inf_dir_tested = 'MaxPooling2DWithIndices/'
        log = 'log_indices/'
        gpu_num = '2, 3'
    segnet = True
    if segnet:
        inf_dir_tested = 'SegNet/'
        log = 'log_seg/'
        gpu_num = '2, 3'

    label_list = sorted(os.listdir(base_dir + label_dir))

    shuffle(label_list)

    train = label_list[:int(len(label_list) * 0.8)]
    val = label_list[int(len(label_list) * 0.8):]
    eval = os.listdir(eval_base_dir + eval_lbl_dir)

    print(len(train), len(val), len(eval))

    with tf.Graph().as_default():
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_num
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)
        trainer = Trainer(_train_list=train,
                          _val_list=val,
                          _inf_list=eval,
                          _gpu_num=gpu_num,
                          _no_inidices=test_no_indices,
                          _segnet=segnet)
        trainer.base_dir = base_dir
        trainer.label_dir = label_dir
        trainer.img_dir = train_img_dir
        trainer.log_dir = eval_base_dir + inf_dir + log
        trainer.inf_dir = inf_dir + inf_dir_tested
        trainer.batch_size = 16
        trainer.epoch_steps = 750
        trainer.val_steps = 200
        trainer.n_epochs = 30
        trainer.dag_it = 0
        trainer.update_callback()
        # trains model for defined number of epochs with the actual dataset
        print('Loading labels from %s' %
              (trainer.base_dir + trainer.label_dir))
        print('Loading imgs from %s' % (trainer.base_dir + trainer.img_dir))
        trainer.train()
        print('\nTraining done!\nStarting Prediction\n')
        # safes inferences of images that are unseen by the net

        trainer.base_dir = eval_base_dir
        trainer.img_dir = eval_img_dir
        print('Loading labels from %s' %
              (trainer.base_dir + trainer.label_dir))
        print('Loading imgs from %s' % (trainer.base_dir + trainer.img_dir))

        trainer.predict()
        session.close()
Пример #29
0
                ax.text(x - 1,
                        y - 1,
                        part_place_name,
                        color='black',
                        fontsize=15)
                place_name_list.append(part_place_name)
        return ax, place_name_list


#実際に確認したいターゲットの画像を設定する。
img = cv2.imread("hrei-sign105.png")
input_shape = (50, 100, 1)

with tf.Graph().as_default():
    session = tf.Session('')
    KTF.set_session(session)
    KTF.set_learning_phase(1)

    def schedule(epoch, decay=0.9):
        return base_lr * decay**(epoch)

    base_lr = 3e-4
    optim = Adam(lr=base_lr)

    model = spp_model(input_shape, NUM_CLASSES, optim)
    model.load_weights('./param/learning_place_name.hdf5', by_name=True)

    result_label_list = predict_img(img, model, place_list, input_shape)

print(len(result_label_list))
Пример #30
0
def run_segmentation(X_train,
                     y_train,
                     X_val,
                     y_val,
                     X_test,
                     y_test,
                     network_func,
                     n_classes=30,
                     img_shape=(128, 128, 3),
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'],
                     callbacks=None,
                     batch_size=32,
                     steps_per_epoch=500,
                     num_epochs=30,
                     validation_steps=65,
                     path_save_weights=None,
                     path_save_pred_images=None,
                     num_save_pred_image=None,
                     dropout=False):

    #     import tensorflow as tf
    #     from keras import backend as K

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.90

    train_iter = batch_generator.Batch_generator(X_train,
                                                 target=y_train,
                                                 image_shape=img_shape,
                                                 num_classes=n_classes,
                                                 batch_size=batch_size,
                                                 segment=True)
    val_iter = batch_generator.Batch_generator(X_val,
                                               target=y_val,
                                               image_shape=img_shape,
                                               num_classes=n_classes,
                                               batch_size=batch_size,
                                               segment=True)

    old_session = KTF.get_session()

    # with tf.Graph().as_default():
    session = tf.Session(config=config)
    KTF.set_session(session)

    # if using dropout (a situation when networks would be different between training and testing phase)
    if dropout:
        KTF.set_learning_phase(1)

    inputs = layers.Input(img_shape)
    output_layer = network_func(inputs, n_classes)
    model = models.Model(inputs, output_layer)
    model.summary()
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    model.fit_generator(train_iter,
                        steps_per_epoch=steps_per_epoch,
                        epochs=num_epochs,
                        verbose=1,
                        validation_data=val_iter,
                        validation_steps=validation_steps,
                        callbacks=callbacks,
                        workers=2)

    # save model
    # model.save('./test_log/models/fcn_model.hdf5')
    if path_save_weights != None:
        try:
            model.save_weights(path_save_weights)
        except:
            print("Path to save weigths data is not valid.")

    # predicting and calculate IoU
    print("\n")
    print("=====" * 15)
    print("\n")
    print("Testing model...")
    start = time()
    # extract data
    test = np.vstack([
        np.expand_dims(misc.imresize(misc.imread(t), img_shape), axis=0)
        for t in X_test
    ])

    pred = model_helper.prediction(model, test)  # predicted data
    target = pre.pixelwise_class_array(y_test)  # ground truths

    iou = model_helper.ave_iou_score(pred, target)
    end = time()
    print("\n")
    print("IoU score    : {:.6f}".format(iou))
    print("Calcuration time : {:.6f} sec.".format(end - start))

    # Save predicted image
    if path_save_pred_images != None:
        print("\n")
        print("=====" * 15)
        print("\n")
        print("Saving predict image...")

        path_save_pred_images = os.path.join(path_save_pred_images,
                                             'predictions')

        # create directory
        pre.makedirs_if_none(path_save_pred_images)

        # reduce save data if need
        if num_save_pred_image != None:
            pred = pred[:num_save_pred_image]
            X_test = X_test[:num_save_pred_image]

        # convert from class array to image(rgb) array
        pred = pre.pixelwise_array_to_img(pred)

        # Save data
        for img, file_path in zip(pred, X_test):
            misc.imsave(
                os.path.join(path_save_pred_images,
                             os.path.basename(file_path)), img)

        print("Done.")

    # close current session
    KTF.set_session(old_session)