Exemplo n.º 1
0
    def test_experiment_fit_gen(self, get_model, get_loss_metric,
                                get_custom_l, get_callback_fix):
        new_session()
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        model_name = model.__class__.__name__
        _, data_val_use = make_data(train_samples, test_samples)
        expe = Experiment(model)

        for val in [1, data_val_use]:
            gen, data, data_stream = make_gen(batch_size)
            if val == 1:
                val, data_2, data_stream_2 = make_gen(batch_size)
            expe.fit_gen([gen], [val], nb_epoch=2,
                         model=model,
                         metrics=metrics,
                         custom_objects=cust_objects,
                         samples_per_epoch=64,
                         nb_val_samples=128,
                         verbose=2, overwrite=True,
                         callbacks=get_callback_fix)

            close_gens(gen, data, data_stream)
            if val == 1:
                close_gens(val, data_2, data_stream_2)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Exemplo n.º 2
0
    def test_build_predict_func(self, get_model):
        """Test the build of a model"""
        new_session()
        X_tr = np.ones((train_samples, input_dim))
        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_name = model.__class__.__name__

        pred_func = KTB.build_predict_func(model)

        tensors = [X_tr]
        if model_name != 'Model':
            tensors.append(1.)

        res = pred_func(tensors)

        assert len(res[0]) == len(X_tr)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Exemplo n.º 3
0
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
            weights_path='model_data/tiny_yolo_weights.h5'):
    '''create the training model, for Tiny YOLOv3'''
    K.clear_session() # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
        num_anchors//2, num_classes+5)) for l in range(2)]

    model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
    print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze the darknet body or freeze all but 2 output layers.
            num = (20, len(model_body.layers)-2)[freeze_body-1]
            for i in range(num): model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))

    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
        [*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model
Exemplo n.º 4
0
    def run_one_eval(self, train_x, train_y, valid_x, valid_y, epochs, config):
        model, history = self.train_model_for_data(train_x, train_y, epochs, config, valid=0.1)
        loss = model.evaluate(valid_x, valid_y, verbose=0)
        print("Running an eval with config: %s had validation loss %f" % (str(config), loss))

        K.clear_session()
        return loss
Exemplo n.º 5
0
            def predictor(q):
                model = make_model(network, (None, None, channels))
                model.load_weights('trained_models/' + weights)

                while True:
                    f, image = q.get()
                    if image is None:
                        break

                    preds = []
                    for tta in [None, 'hflip', 'vflip', 'hflip+vflip']:
                        ttas = []
                        if tta:
                            ttas = tta.split("+")
                        img = do_tta(image, ttas)
                        pred = model.predict(np.expand_dims(img, axis=0), batch_size=1, verbose=0)[0]
                        pred = undo_tta(pred, ttas)
                        preds.append(pred)
                    mask = np.average(np.array(preds), axis=0)
                    all_masks_dir = "all_masks"
                    os.makedirs(all_masks_dir, exist_ok=True)
                    model_mask_dir = os.path.join(all_masks_dir, out_dir)
                    os.makedirs(model_mask_dir, exist_ok=True)
                    cv2.imwrite(os.path.join(model_mask_dir, f + ".png"), mask * 255)
                del model
                K.clear_session()
Exemplo n.º 6
0
    def test_experiment_fit(self, get_model, get_loss_metric,
                            get_custom_l, get_callback_fix):
        new_session()
        data, data_val = make_data(train_samples, test_samples)
        model, metrics, cust_objects = prepare_model(get_model(get_custom_l),
                                                     get_loss_metric,
                                                     get_custom_l)

        expe = Experiment(model)

        for mod in [None, model]:
            for data_val_loc in [None, data_val]:
                expe.fit([data], [data_val_loc], model=mod, nb_epoch=2,
                         batch_size=batch_size, metrics=metrics,
                         custom_objects=cust_objects, overwrite=True,
                         callbacks=get_callback_fix)

        expe.backend_name = 'another_backend'
        expe.load_model()
        expe.load_model(expe.mod_id, expe.data_id)

        assert expe.data_id is not None
        assert expe.mod_id is not None
        assert expe.params_dump is not None

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Exemplo n.º 7
0
    def _run(self):

        round_params(self)

        try:
            _hr_out, self.keras_model = self._model()
        except TypeError:
            print('The model needs to have Return in format "return history, model"')

        self.epoch_entropy.append(epoch_entropy((_hr_out)))
        _hr_out = run_round_results(self, _hr_out)

        self._val_score = get_score(self)

        write_log(self)
        self.result.append(_hr_out)
        save_result(self)
        time_estimator(self)

        if (self.round_counter + 1) % self.reduction_interval == 0:
            if self.reduction_method == 'spear':
                self = spear_reducer(self)

        K.clear_session()
        self.round_counter += 1
Exemplo n.º 8
0
def start_session_get_args_and_model(intra_ops, inter_ops, semantics_json, weights_hd5=None, tensor_type=None):
    K.clear_session()
    K.get_session().close()
    cfg = K.tf.ConfigProto(intra_op_parallelism_threads=intra_ops, inter_op_parallelism_threads=inter_ops)
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))
    return args_and_model_from_semantics(semantics_json, weights_hd5, tensor_type)
    def KerasEmit(original_framework, architecture_name, architecture_path, weight_path, image_path):
        # IR to code
        converted_file = original_framework + '_keras_' + architecture_name + "_converted"
        converted_file = converted_file.replace('.', '_')
        emitter = Keras2Emitter((architecture_path, weight_path))
        emitter.run(converted_file + '.py', None, 'test')
        del emitter

        # import converted model
        model_converted = __import__(converted_file).KitModel(weight_path)

        func = TestKit.preprocess_func[original_framework][architecture_name]
        img = func(image_path)
        input_data = np.expand_dims(img, 0)

        predict = model_converted.predict(input_data)
        converted_predict = np.squeeze(predict)

        del model_converted
        del sys.modules[converted_file]

        import keras.backend as K
        K.clear_session()

        os.remove(converted_file + '.py')
        return converted_predict
Exemplo n.º 10
0
    def __call__(cls, *args, **kwargs):
        obj = cls.__new__(cls)
        from .keras_model import KerasModel
        if issubclass(cls, KerasModel):
            import keras.backend as K
            if K.backend() != 'tensorflow':
                obj.__init__(*args, **kwargs)
                return obj

            K.clear_session()
            obj.graph = tf.Graph()
            with obj.graph.as_default():
                if hasattr(cls, '_config_session'):
                    obj.sess = cls._config_session()
                else:
                    obj.sess = tf.Session()
        else:
            obj.graph = tf.Graph()

        for meth in dir(obj):
            if meth == '__class__':
                continue
            attr = getattr(obj, meth)
            if callable(attr):
                if issubclass(cls, KerasModel):
                    wrapped_attr = _keras_wrap(attr, obj.graph, obj.sess)
                else:
                    wrapped_attr = _graph_wrap(attr, obj.graph)
                setattr(obj, meth, wrapped_attr)
        obj.__init__(*args, **kwargs)
        return obj
Exemplo n.º 11
0
def test_clone_sequential_model():
    val_a = np.random.random((10, 4))
    val_out = np.random.random((10, 4))

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(4, input_shape=(4,)))
    model.add(keras.layers.BatchNormalization())
    model.add(keras.layers.Dropout(0.5))
    model.add(keras.layers.Dense(4))

    if K.backend() == 'tensorflow':
        # Everything should work in a new session.
        K.clear_session()

    # With placeholder creation
    new_model = keras.models.clone_model(model)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(val_a, val_out)

    # On top of new tensor
    input_a = keras.Input(shape=(4,))
    new_model = keras.models.clone_model(
        model, input_tensors=input_a)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(val_a, val_out)

    # On top of new, non-Keras tensor
    input_a = keras.backend.variable(val_a)
    new_model = keras.models.clone_model(
        model, input_tensors=input_a)
    new_model.compile('rmsprop', 'mse')
    new_model.train_on_batch(None, val_out)
Exemplo n.º 12
0
def train(args):
    with open("conf/fea.yaml") as fin:
        cfg = yaml.load(fin)[args.p]
        begin, end = map(int, cfg["train"].split("-"))
    with TimeLog("load data"):
        data = read_data(cfg["data"], begin, end)

    if not args.m:
        args.m = args.p
    else:
        args.m = args.p + "_" + args.m
    print "model={m}, data={d}".format(d=cfg["data"], m=args.m)
    model_dir = "model/" + args.m + "/"
    makedirs(model_dir)

    if False:
        model = load_model(model_dir)
    else:
        model = make_model(model_dir, len(data.fea[0]))

    gamma = 0.6
    n_epochs = [10, 10]
    lr = 0.001
    batch_size = 1024

    def lr_scheduler(epoch):
        learning_rate = lr
        ep = epoch
        for n_epoch in n_epochs:
            if ep - n_epoch < 0:
                break
            learning_rate *= gamma
            ep -= n_epoch
        print 'lr: %f' % learning_rate
        return learning_rate

    scheduler = LearningRateScheduler(lr_scheduler)
    if args.opt == "adam":
        optimizer = Adam(lr=lr)
    else:
        optimizer = SGD(lr=lr)
    # model.compile(loss='binary_crossentropy', optimizer=optimizer)
    model.compile(loss='mse', optimizer=optimizer)

    filepath = model_dir + "/e{epoch:d}.hdf5"
    checkpoint = ModelCheckpoint(
        filepath, monitor='mse',
        verbose=1, save_best_only=False, mode='auto')
    callbacks_list = [scheduler, checkpoint]

    model.fit(data.fea, data.tgt, batch_size=batch_size,
              validation_split=0.1,
              nb_epoch=sum(n_epochs), callbacks=callbacks_list)
    print 'saving...'
    weightPath = model_dir + '/weight.hdf5'
    model.save_weights(weightPath, overwrite=True)

    print 'training finished'
    K.clear_session()
Exemplo n.º 13
0
def clear_session_after_test():
    """Test wrapper to clean up after TensorFlow and CNTK tests.

    This wrapper runs for all the tests in the keras test suite.
    """
    yield
    if K.backend() == 'tensorflow' or K.backend() == 'cntk':
        K.clear_session()
Exemplo n.º 14
0
    def __init__(self, path=DEFAULT_MODEL_PATH):
        logger.info('Loading model from: {}...'.format(path))
        clear_session()

        self.model = models.load_model(path)
        # this seems to be required to make Keras models play nicely with threads
        self.model._make_predict_function()
        logger.info('Loaded model: {}'.format(self.model.name))
Exemplo n.º 15
0
def new_session():
    if K.backend() == 'tensorflow':  # pragma: no cover
        import tensorflow as tf
        K.clear_session()
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        K.set_session(session)
Exemplo n.º 16
0
def clear_session():
    try:
        K.clear_session()
        K.get_session().close()
        cfg = K.tf.ConfigProto()
        cfg.gpu_options.allow_growth = True
        K.set_session(K.tf.Session(config=cfg))
    except AttributeError as e:
        print('Could not clear session. Maybe you are using Theano backend?')
Exemplo n.º 17
0
 def model_eval(self, model, X, y):
     '''
     学習モデルを評価する
     :param numpy.array X: 評価用の画像データのリスト
     :param numpy.array y: 評価用の画像に対する分類ラベルのリスト
     '''
     score = model.evaluate(X, y)
     print('loss=', score[0])
     print('accuracy=', score[1])
     backend.clear_session()
def onExit():
    global dlib_detectors
    global keras_model
    
    if keras_model is not None:
        del keras_model
        K.clear_session()
        
    for detector in dlib_detectors:
        del detector
Exemplo n.º 19
0
def build_part1_RNN(window_size, lstm_size=5):
    # Clear session before creating a new model
    K.clear_session()
    # inputs = Input(shape=(window_size, 1))
    # _lstm = LSTM(lstm_size)(inputs)
    # outputs = Dense(1)(_lstm)
    # return Model(inputs, outputs)
    model = Sequential()
    model.add(LSTM(lstm_size, input_shape=(window_size, 1)))
    model.add(Dense(1))
    return model
Exemplo n.º 20
0
def set_keras_backend(backend):
    if K.backend() != backend:
        os.environ["KERAS_BACKEND"] = backend
        importlib.reload(K)
        assert K.backend() == backend
    if backend == "tensorflow":
        K.get_session().close()
        cfg = K.tf.ConfigProto()
        cfg.gpu_options.allow_growth = True
        K.set_session(K.tf.Session(config=cfg))
        K.clear_session()
Exemplo n.º 21
0
    def keras_get_model(self):
        # keras.backend.tensorflow_backend.clear_session()
        backendK.clear_session()
        # if settings.GPU_FLAG == True:
        #     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
        #     sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        #     backendK.set_session(sess)

        try:
            self.model = keras.models.load_model(self.last_chk_path)
            logging.info("Train Restored checkpoint from:" + self.last_chk_path)
        except Exception as e:
            logging.info("None to restore checkpoint. Initializing variables instead." + self.last_chk_path)
            logging.info(e)

            if self.optimizer == 'sgd':
                self.optimizer = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
            elif self.optimizer == 'rmsprop':
                self.optimizer = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=1e-6)
            elif self.optimizer == 'adagrad':
                self.optimizer = optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=1e-6)
            elif self.optimizer == 'adadelta':
                self.optimizer = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=1e-6)
            elif self.optimizer == 'adam':
                self.optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-6)
                # self.optimizer = optimizers.Adam(lr=self.lr_schedule(0))
            elif self.optimizer == 'adamax':
                self.optimizer = optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=1e-6)
            elif self.optimizer == 'nadam':
                self.optimizer = optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

            if self.net_type == 'inceptionv4':
                # self.labels_cnt = 1001
                self.model = inception_v4_model(self.labels_cnt, 0.2, self.pretrain_model_path)
            # elif self.net_type == 'nasnet':
            #     self.model = NASNetLarge(input_shape=(331, 331, 3))
            elif self.net_type == 'resnet':
                numoutputs = self.netconf["config"]["layeroutputs"]

                if numoutputs == 18:
                    self.model = resnet.ResnetBuilder.build_resnet_18((self.channel, self.x_size, self.y_size), self.labels_cnt)
                elif numoutputs == 34:
                    self.model = resnet.ResnetBuilder.build_resnet_34((self.channel, self.x_size, self.y_size), self.labels_cnt)
                elif numoutputs == 50:
                    self.model = resnet.ResnetBuilder.build_resnet_50((self.channel, self.x_size, self.y_size), self.labels_cnt)
                elif numoutputs == 101:
                    self.model = resnet.ResnetBuilder.build_resnet_101((self.channel, self.x_size, self.y_size), self.labels_cnt)
                elif numoutputs == 152:
                    self.model = resnet.ResnetBuilder.build_resnet_152((self.channel, self.x_size, self.y_size), self.labels_cnt)

            # if settings.GPU_FLAG == True:
            #     self.model = multi_gpu_model(self.model, gpus=1)
            self.model.compile(loss='categorical_crossentropy', optimizer=self.optimizer, metrics=['accuracy'])
Exemplo n.º 22
0
def multi_gpu_application_np_array_benchmark():
    print('####### Xception benchmark - np i/o')
    model_cls = keras.applications.Xception

    num_samples = 1000
    height = 224
    width = 224
    num_classes = 1000
    epochs = 4
    batch_size = 40
    x = np.random.random((num_samples, height, width, 3))
    y = np.random.random((num_samples, num_classes))

    # Baseline
    model = model_cls(weights=None,
                      input_shape=(height, width, 3),
                      classes=num_classes)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop')

    # Training
    start_time = time.time()
    model.fit(x, y, epochs=epochs)
    total_time = time.time() - start_time
    print('baseline training:', total_time)

    # Inference
    start_time = time.time()
    model.predict(x)
    total_time = time.time() - start_time
    print('baseline inference:', total_time)

    for i in range(8, 9):
        K.clear_session()
        with tf.device('/cpu:0'):
            model = model_cls(weights=None,
                              input_shape=(height, width, 3),
                              classes=num_classes)
        parallel_model = multi_gpu_model(model, gpus=i)
        parallel_model.compile(loss='categorical_crossentropy',
                               optimizer='rmsprop')

        start_time = time.time()
        parallel_model.fit(x, y, epochs=epochs, batch_size=batch_size)
        total_time = time.time() - start_time
        print('%d gpus training:' % i, total_time)

        # Inference
        start_time = time.time()
        parallel_model.predict(x, batch_size=batch_size)
        total_time = time.time() - start_time
        print('%d gpus inference:' % i, total_time)
Exemplo n.º 23
0
    def gen_IR(self):
        for layer in self.keras_graph.topological_sort:
            current_node = self.keras_graph.get_node(layer)
            node_type = current_node.type

            if hasattr(self, "rename_" + node_type):
                func = getattr(self, "rename_" + node_type)
                func(current_node)
            else:
                print("KerasParser has not supported operator [%s]." % (node_type))
                self.rename_UNKNOWN(current_node)

        _K.clear_session()
Exemplo n.º 24
0
    def test_deserialization(self):
        model = sequential()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')
        ser_mod = to_dict_w_opt(model)
        custom_objects = {'test_loss': [1, 2]}
        custom_objects = {k: serialize(custom_objects[k])
                          for k in custom_objects}
        model_from_dict_w_opt(ser_mod, custom_objects=custom_objects)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Exemplo n.º 25
0
def build_part2_RNN(window_size, num_chars):
    # Clear session before creating a new model
    K.clear_session()
    # inputs = Input(shape=(window_size, num_chars))
    # _lstm = LSTM(200)(inputs)
    # _lstm = Dense(num_chars)(_lstm)
    # outputs = Activation('softmax')(_lstm)
    # return Model(inputs, outputs)
    model = Sequential()
    model.add(LSTM(200, input_shape=(window_size, num_chars)))
    model.add(Dense(num_chars))
    model.add(Activation('softmax'))
    return model
Exemplo n.º 26
0
    def test_predict(self, get_model):
        """Test to predict using the backend"""
        data, data_val = make_data(train_samples, test_samples)
        model = get_model()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')

        expe = Experiment(model)
        expe.fit([data], [data_val])
        KTB.predict(expe.model_dict, [data['X']])

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
Exemplo n.º 27
0
def predict(args):
    makedirs("ans")
    with open("conf/fea.yaml") as fin:
        cfg = yaml.load(fin)[args.p]
        begin, end = map(int, cfg["test"].split("-"))
    with TimeLog("load data"):
        data = read_data(cfg["data"], begin, end)
    if not args.m:
        args.m = args.p
    model = load_model("model/" + args.m) #, model_file="e3.hdf5")

    pred = model.predict(data.fea, batch_size=1024, verbose=1)
    gen_ans(pred, data, "ans/" + args.m)
    K.clear_session()
Exemplo n.º 28
0
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
            weights_path='model_data/yolo_weights.h5'):
    '''create the training model'''
    K.clear_session() # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
        num_anchors//3, num_classes+5)) for l in range(3)]

    model_body = yolo_body(image_input, num_anchors//3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (185, len(model_body.layers)-3)[freeze_body-1]
            for i in range(num): model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))

    # get output of second last layers and create bottleneck model of it
    out1=model_body.layers[246].output
    out2=model_body.layers[247].output
    out3=model_body.layers[248].output
    bottleneck_model = Model([model_body.input, *y_true], [out1, out2, out3])

    # create last layer model of last layers from yolo model
    in0 = Input(shape=bottleneck_model.output[0].shape[1:].as_list()) 
    in1 = Input(shape=bottleneck_model.output[1].shape[1:].as_list())
    in2 = Input(shape=bottleneck_model.output[2].shape[1:].as_list())
    last_out0=model_body.layers[249](in0)
    last_out1=model_body.layers[250](in1)
    last_out2=model_body.layers[251](in2)
    model_last=Model(inputs=[in0, in1, in2], outputs=[last_out0, last_out1, last_out2])
    model_loss_last =Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
        [*model_last.output, *y_true])
    last_layer_model = Model([in0,in1,in2, *y_true], model_loss_last)

    
    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
        [*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model, bottleneck_model, last_layer_model
Exemplo n.º 29
0
    def inference(cls, architecture, path, image_path):
        if cls.sanity_check(architecture):
            model = cls.architecture_map[architecture]()
            import numpy as np
            func = TestKit.preprocess_func['keras'][architecture]
            img = func(image_path)
            img = np.expand_dims(img, axis=0)
            predict = model.predict(img)
            predict = np.squeeze(predict)
            K.clear_session()
            del model
            return predict

        else:
            return None
Exemplo n.º 30
0
    def download(cls, architecture, path="./"):
        if cls.sanity_check(architecture):
            output_filename = path + 'imagenet_{}.h5'.format(architecture)
            if os.path.exists(output_filename) == False:
                model = cls.architecture_map[architecture]()
                model.save(output_filename)
                print("Keras model {} is saved in [{}]".format(architecture, output_filename))
                K.clear_session()
                del model
                return output_filename

            else:
                print("File [{}] existed, skip download.".format(output_filename))
                return output_filename
        else:
            return None
Exemplo n.º 31
0
def _run_for_all(catalog, entity, threshold, name_rule, upload, sandbox,
                 dir_io, join_method):
    """
    Runs the `linking` procedure using all available classifiers. Joins the results using
    `join_method`
    """
    assert join_method[0] in constants.SC_AVAILABLE_JOIN, (
        'The provided join method needs to be one of: ' +
        str(constants.SC_AVAILABLE_JOIN))

    assert join_method[1] in constants.SC_AVAILABLE_COMBINE, (
        'The provided combine method needs to be one of: ' +
        str(constants.SC_AVAILABLE_COMBINE))

    # ensure that models for all classifiers exist, and directly get the model
    # and results path
    available_classifiers = []
    for classifier_name in list(set(constants.CLASSIFIERS.values())):
        model_path, result_path = _handle_io(classifier_name, catalog, entity,
                                             dir_io)
        # Exit if the model file doesn't exist
        if model_path is None:
            sys.exit(1)

        LOGGER.debug('Loading %s classifier ..', classifier_name)

        available_classifiers.append(
            (classifier_name, joblib.load(model_path), result_path))

    rl.set_option(*constants.CLASSIFICATION_RETURN_SERIES)

    for (
            wd_chunk,
            target_chunk,
            feature_vectors,
    ) in _classification_set_generator(catalog, entity, dir_io):
        # predict the current chunk with all classifiers
        for classifier_name, classifier, result_path in available_classifiers:
            LOGGER.info('Classifying chunk with classifier: %s',
                        classifier_name)

            # The classification set must have the same feature space
            # as the training one
            _add_missing_feature_columns(classifier, feature_vectors)

            predictions = (
                # LSVM doesn't support probability scores
                classifier.predict(feature_vectors) if isinstance(
                    classifier, rl.SVMClassifier) else
                classifier.prob(feature_vectors))

            predictions = _apply_linking_rules(name_rule, predictions,
                                               target_chunk, wd_chunk)

            # Threshold will be applied later, after joining
            (_get_unique_predictions_above_threshold(predictions,
                                                     0.0).to_csv(result_path,
                                                                 mode='a',
                                                                 header=False))

    # Once we have all the classification sets we can proceed to mix them
    # as desired
    all_results = []
    for _, _, result_path in available_classifiers:
        all_results.append(
            (pd.read_csv(result_path,
                         header=None,
                         names=['qid', 'tid',
                                'prediction']).set_index(['qid', 'tid'])))

    LOGGER.info(
        "Joining the results of the classifications using the '%s' method",
        join_method,
    )

    how_to_join, how_to_rem_duplicates = join_method

    # Now we use join the dataframes using the correct method
    merged_results: pd.DataFrame
    if how_to_join == constants.SC_UNION:
        merged_results = ensembles.join_dataframes_by_union(all_results)

    elif how_to_join == constants.SC_INTERSECTION:
        merged_results = ensembles.join_dataframes_by_intersection(all_results)

    # and then proceed to deal with duplicates. This step also removes entries under the
    # specified threshold
    if how_to_rem_duplicates == constants.SC_AVERAGE:
        merged_results = ensembles.remove_duplicates_by_averaging(
            merged_results, threshold)

    elif how_to_rem_duplicates == constants.SC_VOTING:
        merged_results = ensembles.remove_duplicates_by_majority_vote(
            merged_results, threshold)

    merged_results = merged_results['prediction']  # get a pd.Series

    result_path = os.path.join(
        dir_io,
        constants.LINKER_RESULT_JOINED.format(catalog, entity, how_to_join,
                                              how_to_rem_duplicates),
    )

    # Delete existing result file,
    # otherwise the current output would be appended to it
    if os.path.isfile(result_path):
        LOGGER.warning("Will delete old output file found at '%s' ...",
                       result_path)
        os.remove(result_path)

    merged_results.to_csv(result_path, mode='a', header=False)

    if upload:
        _upload(merged_results, 0, catalog, entity, sandbox)

    K.clear_session()  # Clear the TensorFlow graph
 def tearDown(self):
     k.clear_session()
def create_model(input_shape,
                 anchors,
                 num_classes,
                 load_pretrained=True,
                 freeze_body=2,
                 weights_path='model_data/yolo_weights.h5'):
    '''create the training model'''
    K.clear_session()  # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
        num_anchors//3, num_classes+5)) for l in range(3)]

    model_body = yolo_body(image_input, num_anchors // 3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (160, len(model_body.layers) - 3)[freeze_body - 1]
            for i in range(num):
                model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    # get output of second last layers and create bottleneck model of it
    out1 = model_body.layers[246].output
    out2 = model_body.layers[247].output
    out3 = model_body.layers[248].output
    bottleneck_model = Model([model_body.input, *y_true], [out1, out2, out3])

    # create last layer model of last layers from yolo model
    in0 = Input(shape=bottleneck_model.output[0].shape[1:].as_list())
    in1 = Input(shape=bottleneck_model.output[1].shape[1:].as_list())
    in2 = Input(shape=bottleneck_model.output[2].shape[1:].as_list())
    last_out0 = model_body.layers[249](in0)
    last_out1 = model_body.layers[250](in1)
    last_out2 = model_body.layers[251](in2)
    model_last = Model(inputs=[in0, in1, in2],
                       outputs=[last_out0, last_out1, last_out2])
    model_loss_last = Lambda(yolo_loss,
                             output_shape=(1, ),
                             name='yolo_loss',
                             arguments={
                                 'anchors': anchors,
                                 'num_classes': num_classes,
                                 'ignore_thresh': 0.5
                             })([*model_last.output, *y_true])
    last_layer_model = Model([in0, in1, in2, *y_true], model_loss_last)

    model_loss = Lambda(yolo_loss,
                        output_shape=(1, ),
                        name='yolo_loss',
                        arguments={
                            'anchors': anchors,
                            'num_classes': num_classes,
                            'ignore_thresh': 0.5
                        })([*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model, bottleneck_model, last_layer_model
Exemplo n.º 34
0
def TSTR_mnist(identifier,
               epoch,
               generate=True,
               duplicate_synth=1,
               vali=True,
               CNN=False,
               reverse=False):
    """
    Either load or generate synthetic training, real test data...
    Load synthetic training, real test data, do multi-class SVM
    (basically just this: http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html)

    If reverse = True: do TRTS
    """
    print('Running TSTR on', identifier, 'at epoch', epoch)
    if vali:
        test_set = 'vali'
    else:
        test_set = 'test'
    if generate:
        data = np.load('./experiments/data/' + identifier + '.data.npy').item()
        samples = data['samples']
        train_X = samples['train']
        test_X = samples[test_set]
        labels = data['labels']
        train_Y = labels['train']
        test_Y = labels[test_set]
        # now sample from the model
        synth_Y = np.tile(train_Y, [duplicate_synth, 1])
        synth_X = model.sample_trained_model(identifier,
                                             epoch,
                                             num_samples=synth_Y.shape[0],
                                             C_samples=synth_Y)
        # for use in TRTS
        synth_testX = model.sample_trained_model(identifier,
                                                 epoch,
                                                 num_samples=test_Y.shape[0],
                                                 C_samples=test_Y)
        synth_data = {
            'samples': synth_X,
            'labels': synth_Y,
            'test_samples': synth_testX,
            'test_labels': test_Y
        }
        np.save(
            './experiments/tstr/' + identifier + '_' + str(epoch) +
            '.data.npy', synth_data)
    else:
        print('Loading synthetic data from pre-sampled model')
        exp_data = np.load('./experiments/tstr/' + identifier + '_' +
                           str(epoch) + '.data.npy').item()
        test_X, test_Y = exp_data['test_data'], exp_data['test_labels']
        train_X, train_Y = exp_data['train_data'], exp_data['train_labels']
        synth_X, synth_Y = exp_data['synth_data'], exp_data['synth_labels']
    if reverse:
        which_setting = 'trts'
        print('Swapping synthetic test set in for real, to do TRTS!')
        test_X = synth_testX
    else:
        print('Doing normal TSTR')
        which_setting = 'tstr'
    # make classifier
    if not CNN:
        model_choice = 'RF'
        # if multivariate, reshape
        if len(test_X.shape) == 3:
            test_X = test_X.reshape(test_X.shape[0], -1)
        if len(train_X.shape) == 3:
            train_X = train_X.reshape(train_X.shape[0], -1)
        if len(synth_X.shape) == 3:
            synth_X = synth_X.reshape(synth_X.shape[0], -1)
        # if one hot, fix
        if len(synth_Y.shape) > 1 and not synth_Y.shape[1] == 1:
            synth_Y = np.argmax(synth_Y, axis=1)
            train_Y = np.argmax(train_Y, axis=1)
            test_Y = np.argmax(test_Y, axis=1)
    # random forest
    #synth_classifier = SVC(gamma=0.001)
    #real_classifier = SVC(gamma=0.001)
        synth_classifier = RandomForestClassifier(n_estimators=500)
        real_classifier = RandomForestClassifier(n_estimators=500)
        # fit
        real_classifier.fit(train_X, train_Y)
        synth_classifier.fit(synth_X, synth_Y)
        # test on real
        synth_predY = synth_classifier.predict(test_X)
        real_predY = real_classifier.predict(test_X)
    else:
        model_choice = 'CNN'
        synth_predY = train_CNN(synth_X, synth_Y, samples['vali'],
                                labels['vali'], test_X)
        clear_session()
        real_predY = train_CNN(train_X, train_Y, samples['vali'],
                               labels['vali'], test_X)
        clear_session()
        # CNN setting is all 'one-hot'
        test_Y = np.argmax(test_Y, axis=1)
        synth_predY = np.argmax(synth_predY, axis=1)
        real_predY = np.argmax(real_predY, axis=1)

    # report on results
    synth_prec, synth_recall, synth_f1, synth_support = precision_recall_fscore_support(
        test_Y, synth_predY, average='weighted')
    synth_accuracy = accuracy_score(test_Y, synth_predY)
    synth_auprc = 'NaN'
    synth_auroc = 'NaN'
    synth_scores = [
        synth_prec, synth_recall, synth_f1, synth_accuracy, synth_auprc,
        synth_auroc
    ]
    real_prec, real_recall, real_f1, real_support = precision_recall_fscore_support(
        test_Y, real_predY, average='weighted')
    real_accuracy = accuracy_score(test_Y, real_predY)
    real_auprc = 'NaN'
    real_auroc = 'NaN'
    real_scores = [
        real_prec, real_recall, real_f1, real_accuracy, real_auprc, real_auroc
    ]

    all_scores = synth_scores + real_scores

    if vali:
        report_file = open(
            './experiments/tstr/vali.' + which_setting + '_report.v3.csv', 'a')
        report_file.write('mnist,' + identifier + ',' + model_choice + ',' +
                          str(epoch) + ',' + ','.join(map(str, all_scores)) +
                          '\n')
        report_file.close()
    else:
        report_file = open(
            './experiments/tstr/' + which_setting + '_report.v3.csv', 'a')
        report_file.write('mnist,' + identifier + ',' + model_choice + ',' +
                          str(epoch) + ',' + ','.join(map(str, all_scores)) +
                          '\n')
        report_file.close()
        # visualise results
        try:
            plotting.view_mnist_eval(identifier + '_' + str(epoch), train_X,
                                     train_Y, synth_X, synth_Y, test_X, test_Y,
                                     synth_predY, real_predY)
        except ValueError:
            print('PLOTTING ERROR')
            pdb.set_trace()
    print(classification_report(test_Y, synth_predY))
    print(classification_report(test_Y, real_predY))
    return synth_f1, real_f1
Exemplo n.º 35
0
def main():
    global NUM_PARENT_NETWORKS
    global CHILDREN_PER_PARENT
    global NUM_MUTATION_WEIGHTS
    global MUTATION_FACTOR

    env = gym.make('CartPole-v1')
    env.reset()

    # time.sleep(0.5)

    gym_img = env.render(mode='rgb_array')  #current_window_img(WINDOW_OFFSET)
    gym_img = rgb2gray(gym_img)
    gym_img = gym_img[150:350, 200:400]
    gym_img = resize(gym_img, (25, 25))
    # exit()
    gym_img = gym_img.astype('float32') / 255.0

    img_tensor = np.array(gym_img, dtype='float')
    img_tensor = img_tensor.reshape(
        (img_tensor.shape[0], img_tensor.shape[1], 1))

    for _ in range(NUM_PREVIOUS_USING_STATES):
        img_tensor = np.append(img_tensor, img_tensor[:, :, 0:1], axis=2)

    for i in range(NUM_PARENT_NETWORKS):
        nn = generate_model(img_tensor.shape)
        nn.save('nn' + str(i) + '.h5')
        K.clear_session()
        gc.collect()

    K.clear_session()
    gc.collect()

    # nnetworks = [generate_model(img_tensor.shape)
    #              for i in range(NUM_PARENT_NETWORKS)]

    nn = models.load_model('nn0.h5')

    layers_info = []
    for i in range(len(nn.layers)):
        layers_info.append(Weights(nn.layers[i]))

    max_reward = 0
    for gen_idx in range(NUM_GENERATION):
        print('Generation {}'.format(gen_idx))
        with open('GAConfig.txt') as cfg:
            NUM_PARENT_NETWORKS = int(cfg.readline())
            CHILDREN_PER_PARENT = int(cfg.readline())
            NUM_MUTATION_WEIGHTS = int(cfg.readline())
            MUTATION_FACTOR = np.float32(float(cfg.readline()))
            print(NUM_PARENT_NETWORKS, CHILDREN_PER_PARENT,
                  NUM_MUTATION_WEIGHTS, MUTATION_FACTOR)

        for net_idx in range(NUM_PARENT_NETWORKS):
            for child_idx in range(CHILDREN_PER_PARENT):
                partner_idx = get_partner_idx(net_idx, NUM_PARENT_NETWORKS)
                nn_parent1 = models.load_model('nn' + str(net_idx) + '.h5')
                nn_parent2 = models.load_model('nn' + str(partner_idx) + '.h5')
                child_model = generate_child(nn_parent1, nn_parent2,
                                             img_tensor.shape, layers_info)
                safe_idx = NUM_PARENT_NETWORKS + net_idx * CHILDREN_PER_PARENT + child_idx
                child_model.save('nn' + str(safe_idx) + '.h5')
                K.clear_session()
                gc.collect()
            K.clear_session()
            gc.collect()
            # nnetworks.append(child_model)

        num_networks = NUM_PARENT_NETWORKS + CHILDREN_PER_PARENT * NUM_PARENT_NETWORKS

        rewards = [0 for i in range(num_networks)]
        for network_idx in range(num_networks):
            current_nn = models.load_model('nn' + str(network_idx) + '.h5')
            run_results = np.array([])
            for start_id in range(NUM_STARTS_FOR_AVRG):
                reward = 0
                env.reset()

                prev_states = np.zeros(
                    (img_tensor.shape[0], img_tensor.shape[1],
                     img_tensor.shape[2] - 1))
                # for i in range(img_tensor.shape[2] - 1):
                #     prev_states[:,:,i:i+1] = img_tensor[:,:,0:1]

                while reward < MAX_REWARD:
                    env.render()
                    gym_img = env.render(
                        mode='rgb_array')  #current_window_img(WINDOW_OFFSET)
                    gym_img = rgb2gray(gym_img)
                    gym_img = gym_img[150:350, 200:400]
                    gym_img = resize(gym_img, (25, 25))
                    gym_img = gym_img.astype('float32') / 255.0

                    gym_tensor = np.array(gym_img, dtype='float')
                    gym_tensor = gym_tensor.reshape(
                        (gym_tensor.shape[0], gym_tensor.shape[1], 1))
                    for i in range(NUM_PREVIOUS_USING_STATES):
                        gym_tensor = np.append(gym_tensor,
                                               prev_states[:, :, i:i + 1],
                                               axis=2)

                    gym_tensor = np.expand_dims(gym_tensor, axis=0)

                    predict = current_nn.predict(gym_tensor)
                    action = 0 if predict[0][0] < 0.5 else 1
                    _, _, done, _ = env.step(action)
                    reward += 1

                    if done:
                        run_results = np.append(run_results, reward)
                        break
                    else:
                        # if reward % 2 == 0:
                        update_prev_states(prev_states, gym_tensor[:, :, :,
                                                                   0:1])

            rewards[network_idx] = int(np.mean(run_results))
            if max_reward < max(rewards):
                max_reward = max(rewards)
                with open("max_reward.txt", "w") as f:
                    f.writelines(['MAX REWARD COMMON: {}'.format(max_reward)])
                current_nn.save('best_network.h5')
            print('Network {}: {}'.format(network_idx, rewards[network_idx]))

        print('-' * 40)
        print('MAX REWARD CURRENT: {}'.format(max(rewards)))
        print('MAX REWARD COMMON: {}'.format(max_reward))
        print('-' * 40)

        nnetworks = selection(num_networks, rewards, NUM_PARENT_NETWORKS,
                              RANDOM_SELECTED_NETWORKS)

        # for i in range(len(nnetworks)):
        #     nnetworks[i].save('tmp'+str(i) + '.h5')

        # nnetworks.clear()

        K.clear_session()
        gc.collect()
Exemplo n.º 36
0
def main(args):
    try:
        if not args.model_load:
            raise ValueError()
        audio_dir = args.audio_dir

        print "\nReading test data: "
        _, df = combine_all_wavs_and_trans_from_csvs(audio_dir)

        batch_size = args.batch_size
        batch_index = args.batch_index

        mfcc_features = args.mfccs
        n_mels = args.mels
        frequency = 16           # Sampling rate of data in khz (LibriSpeech is 16khz)

        # Training data_params:
        model_load = args.model_load
        load_multi = args.load_multi

        # Sets the full dataset in audio_dir to be available through data_generator
        # The data_generator doesn't actually load the audio files until they are requested through __get_item__()
        epoch_length = 0

        # Load trained model
        # When loading custom objects, Keras needs to know where to find them.
        # The CTC lambda is a dummy function
        custom_objects = {'clipped_relu': models.clipped_relu,
                          '<lambda>': lambda y_true, y_pred: y_pred}

        # When loading a parallel model saved *while* running on GPU, use load_multi
        if load_multi:
            model = models.load_model(model_load, custom_objects=custom_objects)
            model = model.layers[-2]
            print "\nLoaded existing model: ", model_load

        # Load single GPU/CPU model or model saved *after* finished training
        else:
            model = models.load_model(model_load, custom_objects=custom_objects)
            print "\nLoaded existing model: ", model_load

        # Dummy loss-function to compile model, actual CTC loss-function defined as a lambda layer in model
        loss = {'ctc': lambda y_true, y_pred: y_pred}

        model.compile(loss=loss, optimizer='Adam')

        feature_shape = model.input_shape[0][2]

        # Model feature type
        if not args.feature_type:
            if feature_shape == 26:
                feature_type = 'mfcc'
            else:
                feature_type = 'spectrogram'
        else:
            feature_type = args.feature_type

        print "Feature type: ", feature_type

        # Data generation parameters
        data_params = {'feature_type': feature_type,
                       'batch_size': batch_size,
                       'frame_length': 20 * frequency,
                       'hop_length': 10 * frequency,
                       'mfcc_features': mfcc_features,
                       'n_mels': n_mels,
                       'epoch_length': epoch_length,
                       'shuffle': False
                       }

        # Data generators for training, validation and testing data
        data_generator = DataGenerator(df, **data_params)

        # Print model summary
        model.summary()

        # Creates a test function that takes preprocessed sound input and outputs predictions
        # Used to calculate WER while training the network
        input_data = model.get_layer('the_input').input
        y_pred = model.get_layer('ctc').input[0]
        test_func = K.function([input_data], [y_pred])

        if args.calc_wer:
            print "\n - Calculation WER on ", audio_dir
            wer = calc_wer(test_func, data_generator)
            print "Average WER: ", wer[1]

        predictions = predict_on_batch(data_generator, test_func, batch_index)
        print "\n - Predictions from batch index: ", batch_index, "\nFrom: ", audio_dir, "\n"
        for i in predictions:
            print "Original: ", i[0]
            print "Predicted: ", i[1], "\n"

    except (Exception, StandardError, GeneratorExit, SystemExit) as e:
        template = "An exception of type {0} occurred. Arguments:\n{1!r}"
        message = template.format(type(e).__name__, e.args)
        print "e.args: ", e.args
        print message

    finally:
        # Clear memory
        K.clear_session()
def main(argv=None):
    '''
    '''
    main.__doc__ = __doc__
    argv = sys.argv if argv is None else sys.argv.extend(argv)
    desc = main.__doc__  # .format(os.path.basename(__file__))
    # CLI parser
    args = parser_(desc)
    mgpu = 0 if getattr(args, 'mgpu', None) is None else args.mgpu

    checkpt = getattr(args, 'checkpt', None)
    checkpt_flag = False if checkpt is None else True
    filepath = checkpt
    # print('CHECKPT:', checkpt)

    gdev_list = get_available_gpus(mgpu or 1)
    ngpus = len(gdev_list)

    batch_size_1gpu = 32
    batch_size = batch_size_1gpu * ngpus
    num_classes = 1000
    epochs = args.epochs
    data_augmentation = args.aug

    logdevp = args.logdevp

    datadir = getattr(args, 'datadir', None)

    # The data, shuffled and split between train and test sets:
    (x_train, y_train), (x_test,
                         y_test) = synthesize_imagenet_dataset(num_classes)
    train_samples = x_train.shape[0]
    test_samples = y_test.shape[0]
    steps_per_epoch = train_samples // batch_size
    print('train_samples:', train_samples)
    print('batch_size:', batch_size)
    print('steps_per_epoch:', steps_per_epoch)
    # validations_steps = test_samples // batch_size
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # The capacity variable controls the maximum queue size
    # allowed when prefetching data for training.
    capacity = 10000

    # min_after_dequeue is the minimum number elements in the queue
    # after a dequeue, which ensures sufficient mixing of elements.
    # min_after_dequeue = 3000

    # If `enqueue_many` is `False`, `tensors` is assumed to represent a
    # single example.  An input tensor with shape `[x, y, z]` will be output
    # as a tensor with shape `[batch_size, x, y, z]`.
    #
    # If `enqueue_many` is `True`, `tensors` is assumed to represent a
    # batch of examples, where the first dimension is indexed by example,
    # and all members of `tensors` should have the same size in the
    # first dimension.  If an input tensor has shape `[*, x, y, z]`, the
    # output will have shape `[batch_size, x, y, z]`.
    # enqueue_many = True

    # Force input pipeline to CPU:0 to avoid data operations ending up on GPU
    # and resulting in a slow down for multigpu case due to comm overhead.
    with tf.device('/cpu:0'):
        # if no augmentation can go directly from numpy arrays
        # x_train_batch, y_train_batch = tf.train.shuffle_batch(
        #     tensors=[x_train, y_train],
        #     # tensors=[x_train, y_train.astype(np.int32)],
        #     batch_size=batch_size,
        #     capacity=capacity,
        #     min_after_dequeue=min_after_dequeue,
        #     enqueue_many=enqueue_many,
        #     num_threads=8)

        # NOTE: This bakes the whole dataset into the TF graph and for larger
        # datasets it fails on "ValueError: GraphDef cannot be larger than 2GB".
        # TODO: Load the a large dataset via queue from RAM/disk.

        input_images = tf.constant(x_train.reshape(train_samples, -1))
        print('train_samples', train_samples)
        print('input_images', input_images.shape)
        image, label = tf.train.slice_input_producer([input_images, y_train],
                                                     shuffle=True)
        # If using num_epochs=epochs have to:
        #     sess.run(tf.local_variables_initializer())
        #     and maybe also: sess.run(tf.global_variables_initializer())
        image = tf.reshape(image, x_train.shape[1:])
        print('image', image.shape)

        test_images = tf.constant(x_test.reshape(test_samples, -1))
        test_image, test_label = tf.train.slice_input_producer(
            [test_images, y_test], shuffle=False)
        test_image = tf.reshape(test_image, x_train.shape[1:])

        if data_augmentation:
            print('Using real-time data augmentation.')
            # Randomly flip the image horizontally.
            distorted_image = tf.image.random_flip_left_right(image)

            # Because these operations are not commutative, consider
            # randomizing the order their operation.
            # NOTE: since per_image_standardization zeros the mean and
            # makes the stddev unit, this likely has no effect see
            # tensorflow#1458.
            distorted_image = tf.image.random_brightness(distorted_image,
                                                         max_delta=63)
            distorted_image = tf.image.random_contrast(distorted_image,
                                                       lower=0.2,
                                                       upper=1.8)

            # Subtract off the mean and divide by the variance of the
            # pixels.
            image = tf.image.per_image_standardization(distorted_image)

            # Do this for testing as well if standardizing
            test_image = tf.image.per_image_standardization(test_image)

        # Use tf.train.batch if slice_input_producer shuffle=True,
        # otherwise use tf.train.shuffle_batch. Not sure which way is faster.
        x_train_batch, y_train_batch = tf.train.batch([image, label],
                                                      batch_size=batch_size,
                                                      capacity=capacity,
                                                      num_threads=8)

        print('x_train_batch:', x_train_batch.shape)

        # x_train_batch, y_train_batch = tf.train.shuffle_batch(
        #     tensors=[image, label],
        #     batch_size=batch_size,
        #     capacity=capacity,
        #     min_after_dequeue=min_after_dequeue,
        #     num_threads=8)

        x_test_batch, y_test_batch = tf.train.batch(
            [test_image, test_label],
            # TODO: shouldn't it be: batch_size=batch_size???
            batch_size=train_samples,
            capacity=capacity,
            num_threads=8,
            name='test_batch',
            shared_name='test_batch')

    x_train_input = KL.Input(tensor=x_train_batch)

    print('x_train_input', x_train_input)

    gauge = SamplesPerSec(batch_size)
    callbacks = [gauge]

    if _DEVPROF or logdevp:  # or True:
        # Setup Keras session using Tensorflow
        config = tf.ConfigProto(allow_soft_placement=True,
                                log_device_placement=True)
        # config.gpu_options.allow_growth = True
        tfsess = tf.Session(config=config)
        KB.set_session(tfsess)

    model_init = make_model(x_train_input, num_classes)
    x_train_out = model_init.output
    # model_init.summary()

    lr = 0.0001 * ngpus
    if ngpus > 1:
        model = make_parallel(model_init, gdev_list)
    else:
        # Must re-instantiate model per API below otherwise doesn't work.
        model_init = Model(inputs=[x_train_input], outputs=[x_train_out])
        model = model_init

    opt = RMSprop(lr=lr, decay=1e-6)
    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'],
                  target_tensors=[y_train_batch])

    print_mgpu_modelsummary(model)  # will print non-mgpu model as well

    if checkpt_flag:
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='acc',
                                     verbose=1,
                                     save_best_only=True)
        callbacks = [checkpoint]

    # Start the queue runners.
    sess = KB.get_session()
    # sess.run([tf.local_variables_initializer(),
    #           tf.global_variables_initializer()])
    tf.train.start_queue_runners(sess=sess)

    # Fit the model using data from the TFRecord data tensors.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess, coord)

    val_in_train = False  # not sure how the validation part works during fit.

    start_time = time.time()
    model.fit(
        # validation_data=(x_test_batch, y_test_batch)
        # if val_in_train else None,  # validation data is not used???
        # validation_steps=validations_steps if val_in_train else None,
        validation_steps=val_in_train,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        callbacks=callbacks)
    elapsed_time = time.time() - start_time
    print('[{}] finished in {} ms'.format('TRAINING',
                                          int(elapsed_time * 1000)))
    gauge.print_results()

    weights_file = checkptfile  # './saved_cifar10_wt.h5'
    if not checkpt_flag:  # empty list
        model.save_weights(checkptfile)

    # Clean up the TF session.
    coord.request_stop()
    coord.join(threads)

    KB.clear_session()

    # Second Session. Demonstrate that the model works
    # test_model = make_model(x_test.shape[1:], num_classes,
    #                         weights_file=weights_file)
    test_model = make_model(x_test.shape[1:], num_classes)
    test_model.load_weights(weights_file)
    test_model.compile(loss='categorical_crossentropy',
                       optimizer=opt,
                       metrics=['accuracy'])

    if data_augmentation:
        x_proccessed = sess.run(x_test_batch)
        y_proccessed = sess.run(y_test_batch)
        loss, acc = test_model.evaluate(x_proccessed, y_proccessed)
    else:
        loss, acc = test_model.evaluate(x_test, y_test)

    print('\nTest loss: {0}'.format(loss))
    print('\nTest accuracy: {0}'.format(acc))
Exemplo n.º 38
0
    def profit(code):
        from keras.models import load_model
        # import tensorflow as tf
        # global graph, model
        # graph = tf.get_default_graph()

        K.clear_session()

        UP_FOLDER = os.path.dirname(os.path.abspath(os.path.abspath(__file__)))
        codelist = os.path.join(UP_FOLDER,
                                'static\\stock\\Data\\codelist-1.xlsx')
        con = os.path.join(UP_FOLDER,
                           'static\\stock\\Data\\stock_notna_ffill.db')
        conn = os.path.join(UP_FOLDER,
                            'static\\stock\\Data\\stock_external_최종.db')

        codelist1 = pd.read_excel(codelist)
        codelist = codelist1[['종목코드', '종목명']]
        name = codelist[codelist['종목코드'] == code]['종목명'].values[0]
        # codelist1 = codelist1['종목코드']
        # codelist1 = codelist1.values.tolist()
        con = sqlite3.connect(con)
        conn = sqlite3.connect(conn)
        df = Post.stock_chart(code)

        Xtest, Ytest, scaler = dataset1W(con, conn, code, look_back=25)
        model7 = os.path.join(UP_FOLDER,
                              'static\\stock\\model1W\\%s_1W.h5' % code)
        model7 = load_model(model7)
        x = Xtest[len(Xtest) - 7:]
        pre = model7.predict(x)
        del model7
        y = Post.stock_chart(code)
        y = y.values.tolist()
        Pre = scaler.inverse_transform(pre)
        past7 = y[7]
        cur7 = y[-1]
        gap = Pre[-1] - Pre[0]
        future7 = cur7 + gap
        future7 = round(future7[0], 0)
        futurerate7 = float(future7 / cur7)
        futurerate7 = round(futurerate7, 3)
        list7 = []
        list7 = [float(past7), float(cur7), float(future7)]

        Xtest, Ytest, scaler = dataset15D(con, conn, code, look_back=30)
        model15 = os.path.join(UP_FOLDER,
                               'static\\stock\\model15D\\%s_15D.h5' % code)
        model15 = load_model(model15)
        x = Xtest[len(Xtest) - 15:]
        pre = model15.predict(x)
        del model15
        # Ytest = Ytest.reshape(-1,1)
        # Y = scaler.inverse_transform(Ytest)
        # y = y.values.tolist()
        Pre = scaler.inverse_transform(pre)
        past15 = y[15]
        cur15 = y[-1]
        gap = Pre[-1] - Pre[0]
        future15 = cur15 + gap
        future15 = round(future15[0], 0)
        futurerate15 = float(future15 / cur15)
        futurerate15 = round(futurerate15, 3)
        list15 = []
        list15 = [float(past15), float(cur15), float(future15)]

        Xtest, Ytest, scaler = dataset20D(con, conn, code, look_back=60)
        model20 = os.path.join(UP_FOLDER,
                               'static\\stock\\model20D\\%s_20D.h5' % code)
        model20 = load_model(model20)
        x = Xtest[len(Xtest) - 20:]
        pre = model20.predict(x)
        del model20
        # Ytest = Ytest.reshape(-1, 1)
        # Y = scaler.inverse_transform(Ytest)
        # y = y.values.tolist()
        Pre = scaler.inverse_transform(pre)
        past20 = y[20]
        cur20 = y[-1]
        gap = Pre[-1] - Pre[0]
        future20 = cur20 + gap
        future20 = round(future20[0], 0)
        futurerate20 = float(future20 / cur20)
        futurerate20 = round(futurerate20, 3)
        list20 = []
        list20 = [float(past20), float(cur20), float(future20)]

        # Xtest, Ytest, scaler = dataset15D(con, conn, code, look_back=30)
        # model15 = os.path.join(UP_FOLDER, 'static\\stock\\model15D\\%s_15D.h5' % code)
        # img1W = os.path.join(UP_FOLDER, 'static\\stock\\preimg\\%s_15D_pre.png' % code)
        # model15 = load_model(model15)
        # x = Xtest[len(Xtest) - 15:]
        # pre = model15.predict(x)
        # Pre = scaler.inverse_transform(pre)
        # gap = Pre[-1] - Pre[0]
        # pre = [df[-1], df[-1] + gap]
        # profit = Pre[-1] - Pre[0]

        # profit_list.append(profit)
        # profit_df = pd.DataFrame(profit_list)
        # profit_df.to_excel("D://lec403//로보플젝데이터//%s_profit.xlsx" % i)

        return futurerate7, list7, futurerate15, list15, futurerate20, list20, name
Exemplo n.º 39
0
            callbacks,
            batch_size=32,
            nbr_type=GP['nbr_type'],
            save_path=GP['save_path'],
            len_molecular_hidden_layers=len_molecular_hidden_layers,
            molecular_nbrs=molecular_nbrs,
            conv_bool=conv_bool,
            full_conv_bool=full_conv_bool,
            type_bool=GP['type_bool'],
            sampling_density=GP['sampling_density'])
        frame_loss, frame_mse = ct.train_ac()
    else:
        frame_mse = []
        frame_loss = []

    return frame_loss, frame_mse


def main():

    gParameters = initialize_parameters()
    run(gParameters)


if __name__ == '__main__':
    main()
    try:
        K.clear_session()
    except AttributeError:  # theano does not have this function
        pass
Exemplo n.º 40
0
def mlp(train_data, test_data, train_outcomes, test_outcomes, fold_num,
        results_dir):
    model = Sequential()
    model.add(
        Dense(32,
              activation='relu',
              kernel_constrain=max_norm(),
              input_shape=(train_data.shape[1], )))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(32, kernel_constraint=max_norm(), activation='relu'))
    # model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    model_checkpoint = ModelCheckpoint(results_dir + "fold_" + str(fold_num) +
                                       "_best_weights.hdf5",
                                       monitor="categorical_accuracy",
                                       save_best_only=True)

    adam = Adam(lr=0.0002,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=None,
                decay=1e-5,
                amsgrad=False)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'])
    # model.summary()

    train_labels = to_categorical(train_outcomes, num_classes=2)
    test_labels = to_categorical(test_outcomes, num_classes=2)

    class_weights = class_weight.compute_class_weight(
        'balanced', np.unique(train_outcomes), train_outcomes)

    hist = model.fit(train_data,
                     train_labels,
                     batch_size=128,
                     epochs=1200,
                     validation_data=(test_data, test_labels),
                     callbacks=[model_checkpoint],
                     verbose=False,
                     class_weight=class_weights)

    # print(model.metrics_names)

    plt.figure(figsize=(6, 6))
    plt.plot(hist['categorical_accuracy'], label='Training')
    plt.plot(hist['val_categorical_accuracy'], label='Validation')
    plt.legend(loc='lower right', textsize=20)
    plt.xlabel('Epoch #', fontsize=20)
    plt.ylabel('% accuracy', fontsize=20)
    plt.savefig(results_dir + 'fold_' + str(fold_num) + '_mlp_accuracy.png',
                dpi=500)

    model.load_weights(results_dir + "fold_" + str(fold_num) +
                       "_best_weights.hdf5")
    model.save(results_dir + 'best_bol_model' + str(fold_num) + '.hdf5')

    deep_probabilities = model.predict_proba(test_data)

    train_labels = to_categorical(train_outcomes, num_classes=2)

    explainer = lime.lime_tabular.LimeTabularExplainer(
        train_data,
        training_labels=train_labels,
        discretize_continuous=True,
        discretizer='quartile',
        class_names=['Inactive', 'Active'])

    lime_type_importance = np.zeros((train_data.shape[1]))

    for i in range(test_data.shape[0]):
        prediction = model.predict(test_data[i, ...][np.newaxis, ...])
        if prediction[0][1] > 0.5:
            prediction = 1
            label = ['Active']
        else:
            prediction = 0
            label = ['Inactive']

        if test_outcomes[i] == prediction:
            exp = explainer.explain_instance(test_data[i, ...],
                                             model.predict_proba,
                                             num_features=10,
                                             labels=[prediction])
            exp.save_to_file(results_dir + 'explanation' + str(fold_num) +
                             '-' + str(i) + '.html')
            important_types = exp.as_map()
            # print('types', important_types)

            fig = exp.as_pyplot_figure(label=prediction)
            plt.tight_layout()
            fig.savefig(results_dir + str(fold_num) + '_' + str(i) +
                        '_explained.png')

            for lesion_type in important_types[prediction]:
                lime_type_importance[lesion_type[0]] += lesion_type[1]

    K.clear_session()

    return deep_probabilities, model, lime_type_importance
Exemplo n.º 41
0
def deep_learning_experiment_vector(param, train, test, label_info):
    nb_class = label_info[0]
    nb_people = label_info[1]
    param.nb_modal = 3

    if param.method == method_select['people']:
        nb_repeat = nb_people
    elif param.method in method_select['repeat']:
        nb_repeat = 20
    elif param.method in method_select["CrossValidation"]:
        nb_repeat = param.collect["CrossValidation"] * 5

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    for repeat in range(nb_repeat):

        print(f"{dt()} :: {repeat+1}/{nb_repeat} experiment progress")

        tartr = train[repeat]
        tarte = test[repeat]

        tr_data = [tartr["data_0"], tartr["data_1"], tartr["data_2"]]
        te_data = [tarte["data_0"], tarte["data_1"], tarte["data_2"]]
        if param.datatype == "type":
            tr_label = tartr["tag"] - 1
            te_label = tarte["tag"] - 1
            nb_class = label_info[0]
        elif param.datatype == "disease":
            tr_label = tartr["tag"]
            te_label = tarte["tag"]
            nb_class = label_info[0]

        cat_tr = preprocessing.to_categorical(tr_label, nb_class)
        cat_te = preprocessing.to_categorical(te_label, nb_class)

        model = model_compactor.model_setting(param, train[repeat],
                                              test[repeat],
                                              [nb_class, nb_people])
        print(f"{dt()} :: MODEL={param.model_name}, METHOD={param.method}")

        log_dir = f"../Log/{param.model_name}_{param.method}"
        # log_dir = f"/home/blackcow/mlpa/workspace/gait-rework/gait-rework/Log/{param.model_name}_{param.method}"

        # tb_hist = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)

        # # model.summary()
        # model_result = model.fit(x=tr_data, y=cat_tr, epochs=param.epochs, batch_size=param.batch_size
        #                          , validation_data=(te_data, cat_te), verbose=2, callbacks=[tb_hist])

        # model_score = model.evaluate(x=te_data, y=cat_te, verbose=0)

        while True:
            x_train1 = list()
            x_train2 = list()
            x_train3 = list()

            y_train = list()

            print(f"total batch : {len(tr_data[0]) // param.batch_size}")
            for i in range(len(tr_data[0]) // param.batch_size):
                x_batch1 = tr_data[0][i * param.batch_size:(i + 1) *
                                      param.batch_size]
                x_batch2 = tr_data[1][i * param.batch_size:(i + 1) *
                                      param.batch_size]
                x_batch3 = tr_data[2][i * param.batch_size:(i + 1) *
                                      param.batch_size]

                x_train1.append(x_batch1)
                x_train2.append(x_batch2)
                x_train3.append(x_batch3)
                y_train.append(cat_tr[i * param.batch_size:(i + 1) *
                                      param.batch_size])

            model.summary()
            optimizer = tf.optimizers.Adam(lr=0.0001)
            loss_object = tf.keras.losses.CategoricalCrossentropy()
            fin_loss_object = tf.keras.losses.CategoricalCrossentropy()

            train_loss = tf.keras.metrics.Mean(name='train_loss')
            train_accuracy = tf.keras.metrics.CategoricalAccuracy(
                name='train_accuracy')

            test_loss = tf.keras.metrics.Mean(name='test_loss')
            test_accuracy = tf.keras.metrics.CategoricalAccuracy(
                name='test_accuracy')

            for epoch in range(param.epochs):

                for step, (x_batch1, x_batch2, x_batch3, y_batch) in enumerate(
                        zip(x_train1, x_train2, x_train3, y_train)):
                    # predicted = model.predict([x_batch1, x_batch2, x_batch3])

                    with tf.GradientTape() as tape:
                        logits = model([x_batch1, x_batch2, x_batch3])

                        loss_val1 = loss_object(y_batch, logits[0])
                        loss_val2 = loss_object(y_batch, logits[1])
                        loss_val3 = loss_object(y_batch, logits[2])

                        true_loss = tf.math.add(logits[0] * 0.3,
                                                logits[1] * 0.3,
                                                logits[2] * 0.3)
                        true_loss = fin_loss_object(y_batch, logits[6])
                    # gen = model.train_on_batch(, [y_batch, y_batch, y_batch])
                    # print(f'train_loss : {gen}')

                    grads = tape.gradient(true_loss, model.trainable_variables)
                    optimizer.apply_gradients(
                        (grads, var)
                        for (grads,
                             var) in zip(grads, model.trainable_variables)
                        if grads is not None)

                    tr_loss = train_loss(true_loss)
                    tr_acc1 = train_accuracy(y_batch, logits[0])
                    tr_acc2 = train_accuracy(y_batch, logits[1])
                    tr_acc3 = train_accuracy(y_batch, logits[2])

                    tr_acc4 = train_accuracy(y_batch, logits[6])

                    sim_images = np.reshape(logits[3], (-1, 128, 128, 1))
                    logdir = f"../Log/similarity_matrix/{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
                    file_writer = tf.summary.create_file_writer(logdir)
                    with file_writer.as_default():
                        tf.summary.scalar("train_loss", tr_loss, step=epoch)
                        tf.summary.scalar("train_acc", tr_acc4, step=epoch)
                        tf.summary.image("Similarity Matrix",
                                         sim_images,
                                         step=epoch,
                                         max_outputs=12)

                    print(
                        f'[step : {step}/{len(x_train1)}] [epochs : {epoch}/{param.epochs}]'
                        f'train loss : {tr_loss}, domain 1-3_accuracy : {tr_acc1*100}, {tr_acc2*100}, {tr_acc3*100}'
                    )
                    print(
                        f'train merge acc : {tr_acc4*100} test loss : not implemented...'
                    )

            model.evaluate([te_data[0], te_data[1], te_data[2]])

        if repeat == 0:
            tracking = [
                dt(), param.method, param.model_name, param.nb_combine, repeat,
                model_score[0], model_score[1]
            ]
            ds.stock_result(tracking)
        else:
            tracking = [dt(), repeat, model_score[0], model_score[1]]
            ds.stock_result(tracking)

        ds.save_result_obo(param, tracking)

        model_result = None
        model_score = None
        tracking = None
        tr_data = None
        te_date = None
        K.clear_session()
        tf.keras.backend.clear_session()
        sess.close()
Exemplo n.º 42
0
def full_lstmnet(i):
    print('Full Processing')

    data_1_train = data_1
    #    data_2_train = data_2

    train_data_reg_train = train_data_reg
    train_data_city_train = train_data_city
    train_data_cat1_train = train_data_cat1
    train_data_cat2_train = train_data_cat2
    train_data_prm1_train = train_data_prm1
    train_data_prm2_train = train_data_prm2
    #    train_data_prm3_train  = train_data_prm3
    #    train_data_sqnm_train  = train_data_sqnm
    train_data_usr_train = train_data_usr
    train_data_itype_train = train_data_itype
    train_user_features_train = train_user_features

    labels_train = labels

    pred_test = np.zeros(test_df.shape[0])

    for j in range(1, nbags + 1):
        print('bag ', j, ' Processing')

        model = gru_Bidirectional_selfEmbedding_model()
        #        model = gru_attention_model()

        #        callbacks = [
        #                EarlyStopping(monitor='val_loss', patience=patience, verbose=VERBOSEFLAG),
        #                ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_loss', save_best_only=True, verbose=VERBOSEFLAG),
        #                        ]

        #        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=patience, min_lr=1e-9, epsilon = 0.00001, verbose=VERBOSEFLAG)
        fullepochs = 12
        learning_rate = 1e-3
        #sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
        if optim_type == 'SGD':
            optim = SGD(lr=learning_rate,
                        decay=1e-6,
                        momentum=0.9,
                        nesterov=True)
        else:
            optim = Adam(lr=learning_rate)
        model.compile(optimizer=optim,
                      loss='mean_squared_error',
                      metrics=[root_mean_squared_error])

        model.fit(
            [
                data_1_train, train_data_reg_train, train_data_city_train,
                train_data_cat1_train, train_data_cat2_train,
                train_data_prm1_train, train_data_prm2_train,
                train_data_usr_train, train_data_itype_train,
                train_user_features_train
            ],
            labels_train,
            batch_size=batch_size,
            nb_epoch=fullepochs,
            #callbacks = callbacks, #[callbacks, reduce_lr],
            verbose=VERBOSEFLAG)

        pred_test += model.predict([
            test_data_1, test_data_reg, test_data_city, test_data_cat1,
            test_data_cat2, test_data_prm1, test_data_prm2, test_data_usr,
            test_data_itype, test_user_features
        ],
                                   batch_size=batch_size,
                                   verbose=VERBOSEFLAG)[:, 0]
        del model
        K.clear_session()
        gc.collect()
    pred_test /= nbags
    pred_test = pd.DataFrame(pred_test)
    pred_test.columns = ["deal_probability"]
    pred_test["item_id"] = test_df.item_id.values
    pred_test = pred_test[['item_id', 'deal_probability']]
    pred_test['deal_probability'] = pred_test['deal_probability'].clip(
        0.0, 1.0)
    sub_file = inDir + '/submissions/Prav.nn10.full' + '.csv'
    pred_test.to_csv(sub_file, index=False)
Exemplo n.º 43
0
def main():
    
    my_date_time = '_'.join(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S").split())

    parameters = {
                  'dataset':dataset,
                  'n_folds':n_folds,
                  'n_repeats':n_repeats,
                  'batch_size':batch_size,
                  'nb_epochs':nb_epochs,
                  'my_patience':my_patience,
                  'drop_rate':drop_rate,
                  'my_optimizer':my_optimizer,
                  'my_loss_function': my_loss_function,
                  'dense_units':dense_units,
                  'layer_numer':layer_numer,
                  }
    
    path_root = sys.path[0]

    name_save = path_root + '/results/' + dataset + '_augmentation_' +  my_date_time
      
    print ('========== loading samples ==========')
    
    samples = np.load("sample_all_Train10000_200.npy")
    entry_model = Entry()
    tf = samples[:,0,-1]
    altitute = (samples[:,1,0] - entry_model.constant['R0'])/1000
    downrange = samples[:,2,-1]*entry_model.constant['R0']/1000 - 100
    velocity = samples[:,3,0]
    gamma = samples[:,4,0]*180/np.pi
    
    print ('Tf range: max:%.2f,min:%.2f, [s],'%(np.amax(tf),np.amin(tf)))
    print ('Downrange range: max:%.2f,min:%.2f [km],'%(np.amax(downrange),np.amin(downrange)))
    print ('Initial Altitute range: max:%.2f,min:%.2f,[km]'%(np.amax(altitute),np.amin(altitute)))
    print ('Initial Velocity range:max:%d,min:%d, [m/s]'%(np.amax(velocity),np.amin(velocity)))
    print ('Initial Flight Path Angle range: max:%.2f,min:%.2f,'%(np.amax(gamma),np.amin(gamma)))

    alpha = samples[:,-1,:]

    ys = alpha.flatten() #output row style
    
    r = (samples[:,1,:].flatten() - entry_model.constant['R0'])/entry_model.constant['h0']
    theta = samples[:,2,:].flatten()
    v = samples[:,3,:].flatten()/ entry_model.constant['v0']
    g = samples[:,4,:].flatten()

    tensors = np.column_stack((r,theta,v,g))
    tensors = tensors.astype(np.float32)

    print ('input shape:', tensors.shape)

    print ('========== shuffling data ==========')

    shuffled_idxs = random.sample(range(tensors.shape[0]), int(tensors.shape[0])) # sample w/o replct
    tensors = tensors[shuffled_idxs]
    ys = ys[shuffled_idxs]
    shuffled_idxs = np.array(shuffled_idxs)
    
    print ('========== conducting', n_folds ,'fold cross validation =========='); 
    print ('repeating each fold:', n_repeats, 'times')

    folds = np.array_split(tensors,n_folds,axis=0)

    print ('fold sizes:', [fold.shape[0] for fold in folds])

    folds_labels = np.array_split(ys,n_folds,axis=0)
    
    outputs = []
    histories = []

    for i in range(n_folds):
        
        t = time.time()
        
        x_train = np.concatenate([fold for j,fold in enumerate(folds) if j!=i],axis=0)
        x_test = [fold for j,fold in enumerate(folds) if j==i]
        
        y_train = np.concatenate([y for j,y in enumerate(folds_labels) if j!=i],axis=0)
        y_test = [y for j,y in enumerate(folds_labels) if j==i]
        
        for repeating in range(n_repeats):
            
            print ('clearing Keras session')
            K.clear_session()
            
            # instantiate model
            model = Sequential()
            
            model.add(Dense(dense_units,input_dim = 4))

            for i in range(layer_numer):
            
                model.add(Dense(dense_units,activation=activation_hidden))
                model.add(Dropout(drop_rate))            
            model.add(Dense(1))
            
                            
            # configure model for training
            model.compile(loss=my_loss_function,
                          optimizer=my_optimizer,
                          metrics=['mse', 'mae'])
            
            print ('model compiled')
            
           #early_stopping = EarlyStopping(monitor='val_acc', # go through epochs as long as acc on validation set increases
           #                               patience=my_patience,
           #                               mode='max') 
            
            history = model.fit(x_train,
                                y_train,
                                batch_size=batch_size,
                                nb_epoch=nb_epochs,
                                validation_data=(x_test, y_test),)
                               # callbacks=[early_stopping])

            # save [min loss,max acc] on test set
            max_acc = max(history.history['val_mean_absolute_error'])
            max_idx = history.history['val_mean_absolute_error'].index(max_acc)
            output = [history.history['val_loss'][max_idx],max_acc]
            outputs.append(output)
            
            # also save full history for sanity checking
            histories.append(history.history)
            
        print ('**** fold', i+1 ,'done in ' + str(math.ceil(time.time() - t)) + ' second(s) ****')

    # save results to disk
    with open(name_save + '_parameters.json', 'w') as my_file:
        json.dump(parameters, my_file, sort_keys=True, indent=4)

    print ('========== parameters defined and saved to disk ==========')

    with open(name_save + '_results.json', 'w') as my_file:
        json.dump({'outputs':outputs,'histories':histories}, my_file, sort_keys=False, indent=4)

    print( '========== results saved to disk ==========')
def main(config): 
    use_data_augmentation = True
    # This is the main configuration object
    training_params = {
        # Name of the split created
        'split': 'name_of_split',
        # Label: 'verb' or 'object'
        'label': 'verb',
        # Execute a quick run: 1 batch for training and 2 videos for test
        'toy_execution': False,
        # If the evaluation is already done and saved, whether to repeat it
        'redo_evaluation': False,
        # Oversample minority classes
        'oversampling': False,
        # Warm up: do 'warmup_epochs' epochs with learnign rate 'warmup_lr'
        'use_warmup': False,
        'warmup_epochs': 3,
        'warmup_lr': 0.01,
        # From 0 to 1, percentage of offset at the beginning and end to sample
        'frame_sampling_offset': 0.,
        # Number of times to run the experiment (averaged at the end)
        'runs': 3,
        'epochs': 100,
        # Skip connection from last layer before ConvLSTM to output hidden
        # states of the ConvLSTM. It requires a 1x1 conv with a number of
        # channels equal to the number of hidden units of the ConvLSTM
        'skip_connect': False,
        # Number of timesteps
        'sequence_length': 25,
        'learning_rate': 0.0001,
        'batch_size': 16,
        # Number of layers to freeze, starting from 0
        # 142 to freeze everything except for the last conv block
        # 0 would set all layers as trainable
        # -1 to freeze all
        'last_layer_to_freeze_conv': 142,
        'optimizer': 'adam',
        # Maximum value to clip the gradient (to avoid large changes)
        'gradient_clipvalue': 0.,
        # Criterion to use for early stopping and also to choose the best model
        'stop_criterion': 'val_f1_metric',
        # Patience for early stopping
        'patience': 10,
        # Number of hidden states used in the ConvLSTM, i.e., number of 
        # output channels
        'num_convlstms': 1,
        'convlstm_hidden': 256,
        'convlstm_add_initial_state': False,
        'apply_conv_betweenconvlstm': False,
        'apply_conv_afterconvlstm': False,
        'last_layer_to_freeze': 0,
        'non_uniform_sampling': False,
        # Normalise input to the ConvLSTM with L2 Normalisation
        'convlstm_normalise_input': False,
        'dropout_rate': 0.,
        'convlstm_dropout_rate': 0.,
        'convlstm_recurrent_dropout_rate': 0.,
        'spatial_dropout_rate': 0.,
        'use_average_pool': True,
        'use_data_augmentation': use_data_augmentation,
        'random_horizontal_flipping': True,
        'random_corner_cropping': True,
        'random_lighting': False,
        # Apply class weighting in the loss function using the training set
        # class distribution as a prior
        'apply_class_weighting': True,
        # Regularisation L1/L2 in the loss. If both are used then L1_L2
        # regularisation is used (keras)
        'l1_reg_beta': 0.,
        'l2_reg_beta': 0.,
        # Add a 1x1 conv after the last conv block but before the non local
        # block in order to reduce the number of channels (ch)
        'add_1x1conv': True,
        'add_1x1conv_ch': 256,
        'min_frames': -1,
        # Activates debug mode: inputs to the network are saved in the folder
        # pointed out by 'debug_folder' below
        'debug_mode': False,
        'debug_folder': 'debug_folder',
        'visualisation_mode': False
    }

    # Name of the experiment (e.g. split_R_verb_detector)
    exp_name = 'split_{}_{}_detector'.format(
        training_params['split'], training_params['label']
    )

    root_path = config['split_path'] + training_params['split'] + '/'

    if training_params['label'] == 'verb':
        training_params['num_classes'] = (
             len(open(root_path + config['train_verbs_file'], 
                 'r').readlines())) 
        training_params['train_classes_file'] = (
            root_path + config['train_verbs_file']
        )
        training_params['val_classes_file'] = (
            root_path + config['val_verbs_file']
        )
        training_params['test_classes_file'] = (
            root_path + config['test_verbs_file']
        )

    elif training_params['label'] == 'object':
        training_params['num_classes'] = (
             len(open(root_path + config['train_objects_file'], 
                 'r').readlines())) 
        training_params['train_classes_file'] = (
            root_path + config['train_objects_file']
        )
        training_params['val_classes_file'] = (
            root_path + config['val_objects_file']
        )
        training_params['test_classes_file'] = (
            root_path + config['test_objects_file']
        )

    init_time = time.time()  

    # For reproducibility
    tf.set_random_seed(1)
    os.environ['PYTHONHASHSEED'] = '1'
    seed(1)
    rn.seed(1)
    
    # Path to folders to save plots and checkpoints
    checkpoints_folder = (config['project_folder'] +
        config['checkpoints_folder'] + 
        '{}/{}/'.format(training_params['split'], exp_name)
    )
    plots_folder = (config['project_folder'] + config['plots_folder'] +
        '{}/{}/'.format(training_params['split'], exp_name)
    )

    # Create any necessary folder
    if not os.path.exists(plots_folder): 
        os.makedirs(plots_folder)
    if not os.path.exists(checkpoints_folder): 
        os.makedirs(checkpoints_folder) 

    # Save training parameters
    with open(plots_folder + 'training_params.json', 'w') as fp:
        json.dump(training_params, fp, indent=4)
    
    # ===============================================================
    # LOAD THE DATA
    # ===============================================================

    # Compute number of videos from each set: train, validation and test
    train_file = root_path + config['train_file']
    val_file = root_path + config['val_file']
    test_file = root_path + config['test_file']
    nb_videos_train = num_sequences(config, training_params, 'train', 
                                    'training', train_file)
    nb_videos_val = num_sequences(config, training_params, 'val',
                                 'training', val_file)
    nb_videos_test = num_sequences(config, training_params, 'test',
                                   'training', test_file) 

    # Compute number of mini-batches for each set
    # Add an extra mini-batch in case that the number of samples
    # is not divisible by the mini-batch size
    nb_batches_train = nb_videos_train // training_params['batch_size']
    if nb_videos_train % training_params['batch_size'] > 0:
        nb_batches_train += 1
    nb_batches_val = nb_videos_val // training_params['batch_size']
    if nb_videos_val % training_params['batch_size'] > 0:
        nb_batches_val += 1
    nb_batches_test = nb_videos_test // training_params['batch_size']
    if nb_videos_test % training_params['batch_size'] > 0:
        nb_batches_test += 1  

    # Necessary to load the model
    custom_objects = {'f1_metric': f1_metric}
    
    if training_params['use_data_augmentation']:
        print('train: using data augmentation')

    # Instantiate the generators of batches for training and validation
    train_generator = BatchGenerator(config, 'train', train_file,
                                     training_params, nb_batches_train)
    val_generator = BatchGenerator(config, 'val', val_file, 
                                   training_params, nb_batches_val)

    total_videos = float(nb_videos_train+nb_videos_val+nb_videos_test)
    
    if training_params['apply_class_weighting']:
        print('Class weighting applied')
    print('Number of videos to => train: {}, val: {}, test: {}'.format(
        nb_videos_train, nb_videos_val, nb_videos_test)
        )
    print('% of videos to => train: {}, val: {}, test: {}'.format(
        nb_videos_train/total_videos*100, nb_videos_val/total_videos*100,
        nb_videos_test/total_videos*100)
    )
    
    if not os.path.exists(plots_folder + 'results.json'):
        all_run_results = dict()
    
    # Vectors to accumulate the results of each run
    accuracy_by_input, accuracy_by_video = dict(), dict()
    f1_by_input, f1_by_video = dict(), dict()

    # ===============================================================
    # EXECUTE THE N RUNS OF THE EXPERIMENT
    # ===============================================================
    verbose = 1
    if training_params['runs'] > 1:
        verbose = 0

    classes_val, indices_val = get_classes_ordered(
        training_params['val_classes_file']
    )
    # Compute train labels to obtain class weights (for the loss function)
    labels_by_video, _ = load_labels(
        config, training_params, 'val', val_file,
        training_params['val_classes_file']
    )
    plot_class_distribution(plots_folder, labels_by_video, classes_val, 'val')
    del labels_by_video
    gc.collect()

    classes_test, indices_test = get_classes_ordered(
        training_params['test_classes_file']
    )
    labels_by_video, _ = load_labels(
        config, training_params, 'test', test_file,
        training_params['test_classes_file']
    )
    plot_class_distribution(plots_folder, labels_by_video, classes_test, 'test')
    del labels_by_video
    gc.collect()

    classes_train, indices_train = get_classes_ordered(
        training_params['train_classes_file']
    )
    labels_by_video, indices_by_video = load_labels(
        config, training_params, 'train', 
        train_file, training_params['train_classes_file']
    )
    plot_class_distribution(plots_folder, labels_by_video,
                            classes_train, 'train')

    if training_params['apply_class_weighting']:
        class_weights = compute_class_weight('balanced',
                                             np.unique(indices_by_video),
                                             indices_by_video)
        plot_weights_distribution(plots_folder, class_weights,
                                 classes_train, 'train')
    
    histories = []
    for run in range(training_params['runs']):
        print('EXECUTING RUN {} ----------------------------------'.format(run))
        run_folder = plots_folder + 'run_{}/'.format(run)
        if not os.path.exists(run_folder):
            os.makedirs(run_folder)
        
        save_best_weights_file = (checkpoints_folder + 
            'best_weights_{}.h5'.format(run) 
        ) 
 
        if os.path.exists(plots_folder + 'results.json'):
            with open(plots_folder + 'results.json', 'r') as json_file:
                all_run_results = json.load(json_file)
        if not 'run_{}'.format(run) in all_run_results:
            all_run_results['run_{}'.format(run)] = dict()

        training_skipped = False
        # If this run has already been computed, skip training
        if not 'training_result' in all_run_results['run_{}'.format(run)]:
            model = deploy_network(config, training_params)
            
            if training_params['epochs'] > 0:
                clipvalue = None
                if training_params['gradient_clipvalue'] > 0.:
                    clipvalue = training_params['gradient_clipvalue']
                    print('Clipping gradient to {}'.format(
                        training_params['gradient_clipvalue']
                    ))

                if training_params['optimizer'] == 'adam':
                    print('Using Adam optimizer')
                    optimizer = Adam(lr=training_params['learning_rate'],
                            beta_1=0.9, beta_2=0.999, epsilon=1e-08,
                            clipvalue=clipvalue)
                elif training_params['optimizer'] == 'sgd':
                    print('Using SGD optimizer')
                    optimizer = SGD(lr=training_params['learning_rate'],
                                    momentum=0.9, decay=1e-6, nesterov=True, 
                                    clipvalue=clipvalue)
                elif training_params['optimizer'] == 'rmsprop':
                    print('Using RMSprop optimizer')
                    optimizer = RMSprop(lr=training_params['learning_rate'])
                
                metric_list = config['metrics'][1:] + [f1_metric]
                model.compile(optimizer=optimizer,
                              loss='categorical_crossentropy',
                              metrics=metric_list)
                model.summary()

                print('Exp {}, run {}'.format(exp_name, run))

                apply_cw = training_params['apply_class_weighting'] 
                # Optional warmup training
                if training_params['use_warmup']:
                    warmup_epochs = training_params['warmup_epochs']
                    history = model.fit_generator(generator=train_generator,
                                            validation_data=val_generator,
                                            epochs=warmup_epochs,
                                            max_queue_size=10,
                                            workers=2, 
                                            verbose=1,
                                            class_weight=(class_weights
                                                if apply_cw
                                                else None),
                                            shuffle=False,
                                            use_multiprocessing=False)

                # Type of criterion for the ModelCheckpoint and EarlyStopping
                # depending on the metric used for the EarlStopping
                if training_params['stop_criterion'] == 'val_loss':
                    mode = 'min'
                else:
                    mode = 'max'
                c = ModelCheckpoint(str(save_best_weights_file),
                                    monitor=training_params['stop_criterion'], 
                                    save_best_only=True, 
                                    save_weights_only=False, 
                                    mode=mode,
                                    period=1)
                e = EarlyStopping(monitor=training_params['stop_criterion'],
                                min_delta=0,
                                patience=training_params['patience'],
                                verbose=0,
                                mode=mode,
                                baseline=None)
                callbacks = [c, e]

                train_time = time.time()
                steps = None
                if training_params['toy_execution']:
                    steps = 1
                history = model.fit_generator(generator=train_generator,
                                            steps_per_epoch=steps,
                                            validation_data=val_generator,
                                            validation_steps=steps,
                                            epochs=training_params['epochs'],
                                            max_queue_size=10,
                                            workers=2, 
                                            verbose=1,
                                            class_weight=(class_weights
                                                if apply_cw
                                                else None),
                                            shuffle=False,
                                            use_multiprocessing=False,
                                            callbacks=callbacks)
            
                # Save the history of training 
                histories.append(history.history)
                print('TRAINING PHASE ENDED')
                metric = training_params['stop_criterion']
                # Depending on the metric to stop the training, choose whether
                # the minimum or maximum value must be chosen
                if metric == 'val_loss':
                    func = np.argmin
                else:
                    func = np.argmax
                best_epoch = func(history.history[metric])

                # Save results to the dictionary
                k1, k2 = 'run_{}'.format(run), 'training_result'
                all_run_results[k1][k2] = dict()
                all_run_results[k1][k2]['best_epoch'] = best_epoch
                all_run_results[k1][k2][
                    'best_epoch_val_loss'] = history.history[
                                                'val_loss'][best_epoch]
                all_run_results[k1][k2][
                    'best_epoch_val_acc'] = history.history[
                                                'val_acc'][best_epoch]
                all_run_results[k1][k2][
                    'best_epoch_val_f1'] = history.history[
                                                'val_f1_metric'][best_epoch]
                # Save intermediate result
                with open(plots_folder + 'results.json', 'w') as f:
                    json.dump(all_run_results, f, indent=4)
                print('Time to train for {} epochs: {}s'.format(
                    training_params['epochs'], time.time()-train_time))
        else:
            training_skipped = True
                                
        # TEST ========================
        print('='*20)
        print('TEST PHASE')
        print('training_skipped', training_skipped)
        # If training was not skipped, save the histories (loss, accuracy and
        # f1 per epoch, per run)
        if not training_skipped:
            save_history(run_folder, history.history)
            del model
        # If training was skipped, load the history of this run
        else:
            histories.append(load_history(run_folder, run))

        # Load the best model for evaluation
        model = load_model(str(save_best_weights_file),
                           custom_objects=custom_objects)
        print('Loaded the checkpoint at {}'.format(save_best_weights_file))
    
        class_list = classes_train
        print('Exp {}, run {}'.format(exp_name, run))
        res_dict = dict()
        for mode in ['train', 'val', 'test']:
            # If the evaluation was already saved and 'redo_evaluation' is not
            # set to True
            if (
                ('evaluation_{}'.format(mode) in 
                    all_run_results['run_{}'.format(run)]) and
                not training_params['redo_evaluation']
            ):
                if not mode in accuracy_by_video:
                	accuracy_by_video[mode] = []
                if not mode in f1_by_video:
                    f1_by_video[mode] = []
                k1, k2 = 'run_{}'.format(run), 'evaluation_{}'.format(mode)
                _f1_by_video = all_run_results[k1][k2]['f1']
                _accuracy_by_video = all_run_results[k1][k2]['accuracy']
                accuracy_by_video[mode].append(_accuracy_by_video/100.)
                #f1_by_input[mode].append(_f1_by_input)
                f1_by_video[mode].append(_f1_by_video/100.)
                print('{}: Accuracy per video: {}, '.format(
                        mode, _accuracy_by_video) +
                      'Macro-F1 per video: {}'.format( _f1_by_video)
                )
                continue

            if training_params['use_data_augmentation']:
                print('{}: using data augmentation'.format(mode))
            
            #if not results_loaded:
            if mode == 'train':
                if training_params['oversampling']:
                    nb_videos_train = num_sequences(config, training_params, 
                                                    'train', 'evaluation',
                                                    train_file)
                nb_videos = nb_videos_train
                generator = load_gaze_plus_sequences(config, 'train',
                                                     train_file,
                                                     training_params)
                classes_train, indices_train = get_classes_ordered(
                    training_params['train_classes_file']
                )
            elif mode == 'val':
                nb_videos = nb_videos_val
                generator = load_gaze_plus_sequences(config, 'val',
                                                     val_file,
                                                     training_params) 
                classes_train, indices_train = get_classes_ordered(
                    training_params['train_classes_file']
                )
            elif mode == 'test':
                nb_videos = nb_videos_test
                generator = load_gaze_plus_sequences(config, 'test',
                                                     test_file,
                                                     training_params) 
                classes_test, indices_test = get_classes_ordered(
                    training_params['train_classes_file']
                )
                            
            if training_params['toy_execution']:
                nb_videos = 2

            predictions_by_video, ground_truth_by_video = [], []
            length_of_videos = dict()
            predictions_by_class = []
            for _ in range(training_params['num_classes']):
                predictions_by_class.append([])
            info = dict()           
            
            # Process video by video
            print('Processing {}, {} videos'.format(mode, nb_videos))
            for i in tqdm(range(nb_videos)):
                batch_x, batch_y, video_name, length = generator.next()
                predictions = model.predict(batch_x)[0]

                # Dictionary to save results by length of video
                if not length in length_of_videos:
                    length_of_videos[length] = []

                # Save class predicted by the model and ground truth
                predicted = np.argmax(predictions,0)
                predictions_by_video.append(predicted)
                ground_truth_by_video.append(batch_y)
            
                # Save prediction by class
                predictions_by_class[batch_y].append(predicted)

                # Save results by video
                info[video_name] = dict()
                info[video_name]['ground_truth_index'] = batch_y
                info[video_name]['ground_truth_class'] = class_list[batch_y]
                info[video_name]['prediction_index'] = predicted
                info[video_name]['prediction_softmax'] = predictions[0]
                info[video_name]['prediction_class'] = class_list[predicted]
                info[video_name]['length'] = length
                info[video_name]['classes'] = class_list
            
            ground_truth_by_video = np.squeeze(np.stack(ground_truth_by_video))
            predictions_by_video = np.squeeze(np.stack(predictions_by_video))

            cm_by_video = confusion_matrix(ground_truth_by_video,
                                        predictions_by_video,
                                        labels=range(
                                            training_params['num_classes']
                                        ))
            _accuracy_by_video = accuracy_score(ground_truth_by_video,
                                                predictions_by_video)
            _f1_by_video = f1_score(ground_truth_by_video, predictions_by_video,
                                    average='macro')

            k1, k2 = 'run_{}'.format(run), 'evaluation_{}'.format(mode)
            all_run_results[k1][k2] = dict()
            all_run_results[k1][k2]['num_videos'] = nb_videos
            all_run_results[k1][k2]['accuracy'] = _accuracy_by_video*100.
            all_run_results[k1][k2]['f1'] = _f1_by_video*100.

            print('{}: Accuracy per video: {}, Macro-F1 per video: {}'.format(
                mode, _accuracy_by_video, _f1_by_video)
            )

            plot_confusion_matrix(
                cm_by_video, class_list,
                run_folder + '_normalized_by_video_{}_{}_{}.pdf'.format(
                    mode, exp_name, run),
                normalize=True,
                title='Normalized confusion matrix for {} set'.format(mode),
                cmap='coolwarm'
            )
            plot_confusion_matrix(
                cm_by_video, class_list,
                run_folder + '_by_video_{}_{}_{}.pdf'.format(
                    mode, exp_name, run),
                normalize=False, 
                title='Confusion matrix for {} set'.format(mode),
                cmap='coolwarm'
            )
          
            # Compute and save results by class                
            for i in range(len(predictions_by_class)):
                if len(predictions_by_class[i]) > 0:
                    pred = predictions_by_class[i]
                    acc = accuracy_score([i]*len(pred),pred)
                    f1 = f1_score([i]*len(pred),pred, average='macro')
                    predictions_by_class[i] = [acc, f1,
                                            len(predictions_by_class[i])]    
                else:
                    predictions_by_class[i] = [0., 0.,
                                            len(predictions_by_class[i])]
            save_results(run_folder, mode, predictions_by_class,
                         class_list, run)

            # Save general info
            save_in_csv(run_folder + '{}_run{}_evaluation_info.csv'.format(
                mode,run), info)
        
            if not mode in accuracy_by_video:
                accuracy_by_video[mode] = []
                f1_by_video[mode] = []

            accuracy_by_video[mode].append(_accuracy_by_video)
            f1_by_video[mode].append(_f1_by_video)

            del generator
            gc.collect()

            with open(plots_folder + 'results.json', 'w') as f:
                json.dump(all_run_results, f, indent=4)
        
        # END OF THE EVALUATION ===========================================
 
        del model
        gc.collect()
        K.clear_session()
        tf.set_random_seed(1)

    # END OF ALL THE RUNS ===========================================
    if not training_skipped: 
        del val_generator, train_generator
        gc.collect()
    
    plot_training_info(plots_folder,
                       exp_name, 
                       config['metrics'] + ['f1_metric'],
                       True,
                       histories)
    # ===============================================================
    # SHOW THE AVERAGED RESULTS
    # ===============================================================

    results_dict = dict()
    print('='*20)
    results_file = open(plots_folder + 'results.txt', 'w')
    for mode in ['train', 'val', 'test']:
        res_msg = '='*20 + '\n'
        res_msg += '{}: AVERAGE RESULTS OF {} RUNS\n'.format(
            mode, training_params['runs'])
        res_msg += '='*20 + '\n'

        results_dict[mode] = dict()
        results_dict[mode]['accuracy_by_video'] = accuracy_by_video[mode]
        results_dict[mode]['f1_by_video'] = f1_by_video[mode]

        res_msg += 'ACCURACY: {:.2f} (-+{:.2f}), MACRO F1: {:.2f} (-+{:.2f})\n'.format(
            np.mean(accuracy_by_video[mode])*100, np.std(accuracy_by_video[mode])*100,
            np.mean(f1_by_video[mode])*100, np.std(f1_by_video[mode])*100
            )

        res_msg += 'RESULTS PER RUN\n'
        res_msg += '----------------\n'
        res_msg += '\nAccuracy by video:\n'
        res_msg += ', '.join(str(x) for x in accuracy_by_video[mode])
        res_msg += '\nMacro F1 by video:\n'
        res_msg += ', '.join(str(x) for x in f1_by_video[mode])
        res_msg += '\n'
        print(res_msg)
        results_file.write(res_msg)
    results_file.close()

    res_msg = '\n\nTime for training and evaluation of every run: {}s'.format(time.time()-init_time)
    
    final_results = dict()
    for run in range(training_params['runs']):
        k1 = 'run_{}'.format(run)
        final_results[k1] = dict()
        for mode in ['train', 'val', 'test']:
            final_results[k1][mode] = dict()
            final_results[k1][mode]['accuracies'] = [x*100 for x in accuracy_by_video[mode]]
            final_results[k1][mode]['f1s'] = [x*100 for x in f1_by_video[mode]]
    
    with open(plots_folder + 'overall_results.json', 'w') as f:
        json.dump(final_results, f, indent=4)
Exemplo n.º 45
0
 def load_model(self, name):
     backend.clear_session()
     self.model = load_model(name)
     return
Exemplo n.º 46
0
    def predict(self):
        '''
        Everyday prediction
        '''
        if not os.path.isdir(self.folder):
            os.makedirs(self.folder)

        data_predict = {}
        data_predict['test_id'] = []
        data_predict['PM2.5'] = []
        data_predict['PM10'] = []
        data_predict['O3'] = []

        tz = pytz.timezone('utc')
        now = datetime.datetime.now(tz)
        now = datetime.datetime(now.year, now.month, now.day, now.hour)

        data_folder = request_data(self.request_paras,
                                   now,
                                   use_caiyun=self.use_caiyun)

        for station_id in self.station_infos:
            print('*' * 5 + station_id + '*' * 5)
            station_info = self.station_infos[station_id]
            for i in range(48):
                data_predict['test_id'].append('{}#{}'.format(station_id, i))
            for p in station_info['pollutions']:
                print('-' * 5 + p + '-' * 5)
                x = prepare_data(data_folder,
                                 station_id,
                                 station_info['model_id'][p],
                                 station_info['norm'][p],
                                 p,
                                 use_caiyun=self.use_caiyun)
                #return x
                #print(datetime.datetime.now())
                model = keras.models.load_model(station_info['model_file'][p],
                                                custom_objects={
                                                    'loss_smape_rmse':
                                                    my_loss.loss_smape_rmse
                                                })
                #print(datetime.datetime.now())
                predicted = model.predict(x)
                #return predicted
                norm_y = utils.normalization()
                norm_y.load(station_info['norm'][p]['norm_y'])
                predicted = norm_y(predicted, forward=False)
                for y in predicted[0]:
                    data_predict[p].append(y)

                K.clear_session()

        length = max(len(data_predict['O3']), len(data_predict['PM2.5']),
                     len(data_predict['PM10']))
        if len(data_predict['O3']) < length:
            for i in range(length - len(data_predict['O3'])):
                data_predict['O3'].append(0)

        predict_file = os.path.join(
            self.folder, self.prefix_predicted + now.strftime("%Y-%m-%d-%H") +
            self.postfix_predicted + '.csv')
        pd_predict = pd.DataFrame.from_dict(data_predict)
        pd_predict.to_csv(predict_file,
                          columns=['test_id', 'PM2.5', 'PM10', 'O3'],
                          index=False)

        if self.auto_submit:
            utils.submit(predict_file, 'submit_' + now.strftime("%Y-%m-%d-%H"),
                         'auto_submit')

        return predict_file
Exemplo n.º 47
0
import matplotlib.pyplot as plt

from cv2 import imread, imwrite
# from imageio import imread, imwrite


from LAST_BUILD.gan_settings import _IMG_ROWS, _IMG_COLS, _CHANNEL, \
    _TRAIN_IMG_PATH, _TRUE_PHOTOS_DIR, _BLENDER_PHOTOS_DIR, \
    _ROTATION, _LIGHTNING, \
    _TRAIN_STEPS,  _BATCH_SIZE, _SAVE_INTERVAL, \
    _OUTPUT_IMAGES_X, _OUTPUT_IMAGES_Y, \
    _MOBILENET_INPUT_SHAPE, \
    _GENERATED_FACES_PATH, _INPUT_TENSOR_SHAPE, _PERSONS

clear_session()
"""
########################################################################################################################
########################################################################################################################
########################################################################################################################
"""

__COLS = 4016
__ROWS = 6016


def _get_labeled_true_photos(directory):
    for path, _, file_names in os.walk(directory):
        for file_name in file_names:
            for rotation_label in _get_rotation_labels():
                for light_label in _get_lightning_labels():
Exemplo n.º 48
0
def main():
    train = pd.read_csv(args.folds_csv)
    MODEL_PATH = os.path.join(args.models_dir, args.network + args.alias)
    folds = [int(f) for f in args.fold.split(',')]

    print('Training Model:', args.network + args.alias)

    for fold in folds:

        K.clear_session()
        print(
            '***************************** FOLD {} *****************************'
            .format(fold))

        if fold == 0:
            if os.path.isdir(MODEL_PATH):
                raise ValueError('Such Model already exists')
            os.system("mkdir {}".format(MODEL_PATH))

        # Train/Validation sampling
        df_train = train[train.fold != fold].copy().reset_index(drop=True)
        df_valid = train[train.fold == fold].copy().reset_index(drop=True)

        # Train on pseudolabels only
        if args.pseudolabels_dir != '':
            pseudolabels = pd.read_csv(args.pseudolabels_csv)
            df_train = pseudolabels.sample(
                frac=1, random_state=13).reset_index(drop=True)

        # Keep only non-black images
        ids_train, ids_valid = df_train[
            df_train.unique_pixels > 1].id.values, df_valid[
                df_valid.unique_pixels > 1].id.values

        print('Training on {} samples'.format(ids_train.shape[0]))
        print('Validating on {} samples'.format(ids_valid.shape[0]))

        # Initialize model
        weights_path = os.path.join(MODEL_PATH,
                                    'fold_{fold}.hdf5'.format(fold=fold))

        # Get the model
        model, preprocess = get_model(args.network,
                                      input_shape=(args.input_size,
                                                   args.input_size, 3),
                                      freeze_encoder=args.freeze_encoder)

        # LB metric threshold
        def lb_metric(y_true, y_pred):
            return Kaggle_IoU_Precision(
                y_true,
                y_pred,
                threshold=0 if args.loss_function == 'lovasz' else 0.5)

        model.compile(optimizer=RMSprop(lr=args.learning_rate),
                      loss=make_loss(args.loss_function),
                      metrics=[lb_metric])

        if args.pretrain_weights is None:
            print('No weights passed, training from scratch')
        else:
            wp = args.pretrain_weights.format(fold)
            print('Loading weights from {}'.format(wp))
            model.load_weights(wp, by_name=True)

        # Get augmentations
        augs = get_augmentations(args.augmentation_name,
                                 p=args.augmentation_prob)

        # Data generator
        dg = SegmentationDataGenerator(input_shape=(args.input_size,
                                                    args.input_size),
                                       batch_size=args.batch_size,
                                       augs=augs,
                                       preprocess=preprocess)

        train_generator = dg.train_batch_generator(ids_train)
        validation_generator = dg.evaluation_batch_generator(ids_valid)

        # Get callbacks
        callbacks = get_callback(args.callback,
                                 weights_path=weights_path,
                                 fold=fold)

        # Fit the model with Generators:
        model.fit_generator(
            generator=ThreadsafeIter(train_generator),
            steps_per_epoch=ids_train.shape[0] // args.batch_size * 2,
            epochs=args.epochs,
            callbacks=callbacks,
            validation_data=ThreadsafeIter(validation_generator),
            validation_steps=np.ceil(ids_valid.shape[0] / args.batch_size),
            workers=args.num_workers)

        gc.collect()
Exemplo n.º 49
0
    def run_cross_validation(self, output_dir=None):
        """
        The main function that calls most of the other interface functions.
        """
        # create the output directory for storing stuff
        if output_dir is not None:
            os.makedirs(output_dir, exist_ok=False)
        else:
            output_dir = "./cv_output"

        # be sure that the context has its data ready
        self.cv_context.data_handler.read_data_if_necessary()

        # allow the cv context to write additional information
        # about the data
        self.cv_context.write_data_info(output_dir)

        # make training/test split
        train_ids, test_ids = self.make_train_test_split()

        if test_ids is not None:
            # check if training and testset are similar wrt survival information
            assert not set(train_ids).intersection(set(test_ids))
            check_cohort_differences(
                train_ids, test_ids, self.cv_context.data_handler.outcome_dict)

        else:
            print("No separate testset available!")

        print("\nStarting {}-fold cross-validation with {} repetitions.".format(
            self.folds, self.reps))

        # for each of the training repetitions create stratified cross validation splits
        # of the training data
        self.create_cv_splits(train_ids)
        # store the cv splits and create folders for each rep and fold
        self.store_cv_splits(output_dir)

        perf_dfs = []  # dataframes of performance indices for each fold
        pred_dfs = dict()
        for r in range(self.reps):
            rep = "rep_" + str(r)
            rep_dir = os.path.join(output_dir, rep)
            # os.makedirs(rep_dir, exist_ok=False)

            for k in range(self.folds):
                fold = "fold_" + str(k)
                fold_dir = os.path.join(rep_dir, fold)
                # os.makedirs(fold_dir, exist_ok=False)

                ids_train, ids_valid = self.cv_splits[r][k]

                pred_dict, perf_df = self.cv_context.run_single_fold(
                    ids_train, ids_valid, test_ids,
                    output_dir=fold_dir, rep=r, fold=k,
                    print_model_summary=((r==0) and (k==0)))

                perf_dfs.append(perf_df)
                # update the pred_dfs dictionary
                # with the dictionary pred_dict that contains
                # the subevaluation results for the current fold
                for res_descr in pred_dict:
                    if res_descr not in pred_dfs:
                        pred_dfs[res_descr] = {}

                    if r not in pred_dfs[res_descr]:
                        pred_dfs[res_descr][r] = {}

                    pred_dfs[res_descr][r][k] = pred_dict[res_descr]

                # release GPU memory after every training
                K.clear_session()

        # all model performances for different folds and reps
        full_perf_df = pd.concat(perf_dfs, ignore_index=True, sort=False)
        # store the evaluation perf_df
        full_perf_df.to_csv(
            os.path.join(output_dir, "model_performances.csv"),
            na_rep="NAN", index=False)

        # store the test set
        pd.DataFrame(test_ids).to_csv(
            os.path.join(output_dir, "ids_test.csv"), index=False,
            header=False)

        return pred_dfs, perf_dfs
Exemplo n.º 50
0
# 300x300의 grayscale 이미지로 리사이즈
train_224 = np.zeros([2048, 100, 100, 3], dtype=np.float32)

for i, s in enumerate(x_train):

    converted = cv2.cvtColor(s, cv2.COLOR_GRAY2RGB)
    # converted =  변환 , 컬러색으로 변환(특성 강조)
    resized = cv2.resize(converted, (100, 100), interpolation=cv2.INTER_CUBIC)
    # 원본이미지, 결과 이미지 크기, 보간법(cv2.INTER_CUBIC, cv2.INTER_LINEAR 이미지 확대할 때 사용/cv2.INTER_AREA는 사이즈 줄일 때 사용)
    # 보간법(interpolation)이란 통계적 혹은 실험적으로 구해진 데이터들(xi)로부터,
    # 주어진 데이터를 만족하는 근사 함수(f(x))를 구하고,  이 식을 이용하여 주어진 변수에 대한 함수 값을 구하는 일련의 과정을 의미
    del converted  # 변수 초기화 (삭제 x)
    train_224[i] = resized
    del resized
    bek.clear_session()
    gc.collect()

    # plt.imshow(train_224[i])
    # plt.show()

from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
# ReduceLROnPlateau에서 lr는 이전 epoch가 끝날 때 변경되고, LearningRateScheduler에서 lr는 현재 epoch가 시작될 때 변경된다.
# 둘 다 써보기 reduc :         ,learning:
from keras.preprocessing.image import ImageDataGenerator
from sklearn import metrics

datagen = ImageDataGenerator(width_shift_range=(-1, 1),
                             height_shift_range=(-1, 1),
                             zoom_range=0.15,
                             validation_split=0.2)
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.average_precision_evaluator import Evaluator

#%matplotlib inline

# Set a few configuration parameters.
img_height = 300
img_width = 300
n_classes = 20


# 1: Build the Keras model
model_mode = 'inference'
model_mode = 'training'

K.clear_session() # Clear previous models from memory.

model = ssd_300(image_size=(img_height, img_width, 3),
                n_classes=n_classes,
                mode=model_mode,
                l2_regularization=0.0005,
                scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
                aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
                                         [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                         [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                         [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                         [1.0, 2.0, 0.5],
                                         [1.0, 2.0, 0.5]],
                two_boxes_for_ar1=True,
                steps=[8, 16, 32, 64, 100, 300],
                offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
 def clear_model(self):
     self.model = None
     K.clear_session()
     gc.collect()
            validation_data=([val_his, val_mask, val_target,
                              val_feats], np.eye(output_dim)[val_y]),
            verbose=1,
            callbacks=[early_stopping])
    oof_df.loc[val_idx, 'nn_prob'] = clf.predict(
        [val_his, val_mask, val_target, val_feats], batch_size=2048)[:, 1]
    print('val auc:', roc_auc_score(val_y, oof_df['nn_prob'].values[val_idx]))
    test_pred_df['nn_prob'] += clf.predict(
        [test_his, test_mask, test_target, test_feats],
        batch_size=2048)[:, 1] / skf.n_splits
    emb_layer = keras.models.Model(inputs=clf.input,
                                   outputs=clf.get_layer(name='emb').output)
    oof_emb[val_idx] = emb_layer.predict(
        [val_his, val_mask, val_target, val_feats], batch_size=2048)
    test_emb += emb_layer.predict(
        [test_his, test_mask, test_target, test_feats],
        batch_size=2048) / skf.n_splits

    del emb_layer, clf
    BKD.clear_session()
    tf.reset_default_graph()
    gc.collect()

    print('runtime: {}\n'.format(time.time() - t))

for i in tqdm(range(emb_size)):
    oof_df['nn_emb_{}'.format(i)] = oof_emb[:, i]
    test_pred_df['nn_emb_{}'.format(i)] = test_emb[:, i]
oof_df.to_pickle('../user_data/data/nn/nn_trn.pkl')
test_pred_df.to_pickle('../user_data/data/nn/nn_test.pkl')
Exemplo n.º 54
0
def predict_liver_and_tumor(model, image, batch, input_size, input_cols,
                            thresh_liver=0.5, thresh_tumor=0.9, num=3,
                            latent_model=None):
    '''
    Prediction of segmentation of both liver and tumor
    '''
    print('Prediction segmentations...')
    window_cols = (input_cols/4)
    count = 0
    box_test = np.zeros((batch, input_size, input_size, input_cols, 1),
                        dtype="float32")

    x = image.shape[0]
    y = image.shape[1]
    z = image.shape[2]
    right_cols = int(min(z, y+10)-input_cols)
    left_cols = max(0, min(0-5, right_cols))
    score = np.zeros((x, y, z, num), dtype='float32')
    score_num = np.zeros((x, y, z, num), dtype='int16')
    print(x, y, z)

    if latent_model:
        # Initialize 3D latent space features array
        n_filters = 504
        # Not sure where this comes from. Start is nb_filters
        # = 96. Then through a combination of compression and growth rate
        # for a number of dense blocks it becomes 504
        x_ls = x / 2**5  # Number of dense blocks + 1
        y_ls = y / 2**5
        latent_features = np.zeros((x_ls, y_ls, z, n_filters), dtype='float32')
        latent_features_num = np.zeros((x_ls, y_ls, z, n_filters), dtype='int16')

    iterator = xrange(left_cols, right_cols+window_cols, window_cols)
    cnum = 0
    for cols in iterator:
        cnum += 1
        # print ('and', z-input_cols,z)
        print(('\t Running column {} / {}.').format(cnum, len(iterator)))
        if cols > z - input_cols:
            patch_test = image[0:input_size, 0:input_size, z-input_cols:z]
            box_test[count, :, :, :, 0] = patch_test
            # print ('final', input_cols-window_cols, input_cols)
            patch_test_mask = model.predict(box_test, batch_size=batch,
                                            verbose=0)
            patch_test_mask = K.softmax(patch_test_mask)
            patch_test_mask = K.eval(patch_test_mask)
            patch_test_mask = patch_test_mask[:, :, :, 1:-1, :]

            for i in xrange(batch):
                score[0:input_size, 0:input_size, z-input_cols+1:z-1, :] += patch_test_mask[i]
                score_num[0:input_size, 0:input_size,  z-input_cols+1:z-1, :] += 1

            if latent_model:
                latent_features_test = latent_model.predict(box_test,
                                                            batch_size=batch,
                                                            verbose=0)

                for i in xrange(batch):
                    # FIXME: adopted size to default settings, should be adaptive
                    latent_features[:, :, z-3:z-1, :] += latent_features_test[i]
                    latent_features_num[:, :, z-3:z-1, :] += 1
        else:
            patch_test = image[0:input_size, 0:input_size, cols:cols + input_cols]
            box_test[count, :, :, :, 0] = patch_test
            patch_test_mask = model.predict(box_test, batch_size=batch,
                                            verbose=0)
            patch_test_mask = K.softmax(patch_test_mask)
            patch_test_mask = K.eval(patch_test_mask)
            patch_test_mask = patch_test_mask[:, :, :, 1:-1, :]
            for i in xrange(batch):
                score[0:input_size, 0:input_size, cols+1:cols+input_cols-1, :] += patch_test_mask[i]
                score_num[0:input_size, 0:input_size, cols+1:cols+input_cols-1, :] += 1

            if latent_model:
                latent_features_test = latent_model.predict(box_test,
                                                            batch_size=batch,
                                                            verbose=0)

                for i in xrange(batch):
                    # FIXME: adopted size to default settings, should be adaptive
                    latent_features[:, :, cols+1:cols+3, :] += latent_features_test[i]
                    latent_features_num[:, :, cols+1:cols+3, :] += 1

    score = score/(score_num + 1e-4)
    if latent_model:
        latent_features = latent_features/(latent_features_num + 1e-4)

    # score[:, :, 0] == background
    result1 = score[:, :, :, num-2]  # Liver
    result2 = score[:, :, :, num-1]  # Lesions

    K.clear_session()

    print('\t Applying postprocessing.')
    result1[result1 >= thresh_liver] = 1
    result1[result1 < thresh_liver] = 0
    result2[result2 >= thresh_tumor] = 1
    result2[result2 < thresh_tumor] = 0
    result1[result2 == 1] = 1

    #  preserve the largest liver
    lesions = result2
    box = list()
    [liver_res, num] = measure.label(result1, return_num=True)
    region = measure.regionprops(liver_res)
    for i in xrange(num):
        box.append(region[i].area)

    label_num = box.index(max(box)) + 1
    liver_res[liver_res != label_num] = 0
    liver_res[liver_res == label_num] = 1

    #  preserve the largest liver
    liver_res = ndimage.binary_dilation(liver_res, iterations=1).astype(liver_res.dtype)
    box = []
    [liver_labels, num] = measure.label(liver_res, return_num=True)
    region = measure.regionprops(liver_labels)
    for i in xrange(num):
        box.append(region[i].area)
    label_num = box.index(max(box)) + 1
    liver_labels[liver_labels != label_num] = 0
    liver_labels[liver_labels == label_num] = 1
    liver_labels = ndimage.binary_fill_holes(liver_labels).astype(int)

    #  preserve tumor within ' largest liver' only
    lesions = lesions * liver_labels
    lesions = ndimage.binary_fill_holes(lesions).astype(int)
    liver_res = np.array(liver_res, dtype='uint8')
    liver_res = ndimage.binary_fill_holes(liver_res).astype(int)
    # liver_res[lesions == 1] = 2

    # transpose back and convert to float 64 for uint8 saving
    lesions = np.transpose(lesions, [2, 1, 0])
    liver_res = np.transpose(liver_res, [2, 1, 0])
    lesions = np.array(lesions, dtype='uint8')
    liver_res = np.array(liver_res, dtype='uint8')

    if latent_model:
        return liver_res, lesions, latent_features
    else:
        return liver_res, lesions
Exemplo n.º 55
0
def process(spath, t_path, conll_path, text_seed, k, prf_file):
    step_1(spath, t_path, text_seed, k)

    # step_2
    # 法律文档*k+conll2012 语料训练
    step_2(t_path, conll_path, text_seed, k)

    # step_3

    # text_seed=2222
    train_file = 'train_%d.data' % (text_seed)
    test_file = 'test_%d.data' % (text_seed)
    train_documents = create_documents(train_file)
    print(len(train_documents))
    # print(train_documents[1].chars)
    # print(train_documents[1].labels)
    for doc in train_documents[:20]:
        print(doc.index, doc.chars, doc.labels)

    test_documents = create_documents(test_file)
    print(len(test_documents))

    # 生成词典
    lexicon, lexicon_reverse = get_lexicon(train_documents + test_documents)
    print(len(lexicon), len(lexicon_reverse))

    embedding_model = gensim.models.Word2Vec.load(r'model_conll_law.m')
    embedding_size = embedding_model.vector_size
    print(embedding_size)

    # 预训练词向量
    embedding_weights = create_embedding(embedding_model, embedding_size,
                                         lexicon_reverse)

    print(embedding_weights.shape)

    print(lexicon_reverse[1])
    print(embedding_weights[1])

    # 0 为padding的label
    label_2_index = {'B': 1, 'M': 2, 'E': 3, 'S': 4}
    index_2_label = {0: 'Pad', 1: 'B', 2: 'M', 3: 'E', 4: 'S'}

    train_data_list, train_label_list, train_index_list = create_matrix(
        train_documents, lexicon, label_2_index)
    test_data_list, test_label_list, test_index_list = create_matrix(
        test_documents, lexicon, label_2_index)
    print(len(train_data_list), len(train_label_list), len(train_label_list))
    print(len(test_data_list), len(test_label_list), len(test_index_list))
    print(train_data_list[1])
    print(train_label_list[1])
    # print(train_index_list[:20])
    print('-' * 15)

    max_len = max(map(len, train_data_list + test_data_list))
    print('max_len:', max_len)  # 128
    min_len = min(map(len, train_data_list + test_data_list))
    print('min_len:', min_len)

    # 前面补0  padding
    print(train_data_list[0])
    train_data_array, train_label_list_padding = padding_sentences(
        train_data_list, train_label_list, max_len)
    print(train_data_array.shape)
    print(train_data_array[0])

    print(test_data_list[0])
    test_data_array, test_label_list_padding = padding_sentences(
        test_data_list, test_label_list, max_len)
    print(test_data_array.shape)
    print(test_data_array[0])

    # label
    # print(train_label_list_padding[0])
    train_label_array=np_utils.to_categorical(train_label_list_padding,len(label_2_index)+1).\
         reshape((len(train_label_list_padding),len(train_label_list_padding[0]),-1))
    print(train_label_array.shape)

    # label
    # print(test_label_list_padding[0])
    test_label_array=np_utils.to_categorical(test_label_list_padding,len(label_2_index)+1).\
         reshape((len(test_label_list_padding),len(test_label_list_padding[0]),-1))
    print(test_label_array.shape)

    # model
    model = Bilstm_CNN_Crf(max_len, len(lexicon),
                           len(label_2_index) + 1, embedding_weights)

    model.summary()

    print(model.input_shape)
    print(model.output_shape)

    plot_model(model,
               to_file='bilstm_cnn_crf_model.png',
               show_shapes=True,
               show_layer_names=True)

    train_nums = len(train_data_array)
    train_array, val_array = train_data_array[:int(train_nums *
                                                   0.9)], train_data_array[
                                                       int(train_nums * 0.9):]
    train_label, val_label = train_label_array[:int(train_nums *
                                                    0.9)], train_label_array[
                                                        int(train_nums * 0.9):]

    print(train_array.shape, train_label.shape)
    print(val_array.shape, val_label.shape)
    print(test_data_array.shape, test_label_array.shape)

    checkpointer=ModelCheckpoint(filepath='best_val_model.hdf5',verbose=1,\
       save_best_only=True,monitor='val_loss',mode='auto')

    # train model

    hist=model.fit(train_array,train_label,batch_size=256,epochs=20,verbose=1,\
       validation_data=(val_array,val_label),callbacks=[checkpointer])

    print(hist.history['val_loss'])
    best_model_epoch = np.argmin(hist.history['val_loss'])
    print('best_model_epoch:', best_model_epoch)

    # 可视化loss acc
    # plot_acc(hist)
    # print(hist.history)

    model.load_weights('best_val_model.hdf5')

    test_y_pred = model.predict(test_data_array, batch_size=512, verbose=1)
    # print(test_y_pred)
    # 预测标签 [0,0,....,1,2,3,1]
    pred_label = np.argmax(test_y_pred, axis=2)
    print(pred_label[0])
    print(test_label_list_padding[0])

    K.clear_session()

    print(pred_label.shape, test_label_list_padding.shape)

    # 生成输出文档
    real_text_list,pred_text_list,real_label_list,pred_label_list=create_pred_text(\
     lexicon_reverse,test_data_array,pred_label,test_label_list_padding,test_index_list)
    '''
	for r_text,p_text,r_label,p_label in zip(real_text_list,pred_text_list,real_label_list,pred_label_list):
		print(r_text)
		print([index_2_label[r] for r in r_label])
		print('-'*10)
		print(p_text)
		print([index_2_label[p] for p in p_label])
		print('='*20)
	'''

    # 写文件
    write_2_file(real_text_list, pred_text_list)

    # score
    F = score.prf_score('real_text.txt', 'pred_text.txt', prf_file, text_seed,
                        best_model_epoch)

    # F_list.append([text_seed,F])

    return F
        val_writer_path = os.path.join(logdir, 'validation')
        val_writer = tf.summary.FileWriter(val_writer_path)
        predictions = model.predict(X,batch_size=1)
        for i in [0, 1, 10, 20]: #camera indices

            img = _plot_y_vs_pred_on_BytesIO(y[:, i], predictions[:, i])
            summary_pb = tf.Summary(value=[tf.Summary.Value(tag="camera_{}_prediction_vs_actual".format(i), image=img)])
            val_writer.add_summary(summary_pb)



        text_tensor = tf.make_tensor_proto([(k,str(v)) for k,v in params.items()], dtype=tf.string)
        meta = tf.SummaryMetadata()
        meta.plugin_data.plugin_name = "text"
        summary_pb = tf.Summary()
        summary_pb.value.add(tag="parameters", metadata=meta, tensor=text_tensor)
        val_writer.add_summary(summary_pb)
        val_writer.close()

        params["_ignore"] = True

        del model
        del history
        del prepare_data
        K.clear_session() #to hopefully prevent slow down after a few models..

    #writing the log file back with potential info about the run (including _ignore flag which is set after it is run
    # once).
    with open(paramss_path,'w') as f:
        json.dump(paramss,f)
Exemplo n.º 57
0
    def DetectLines(self):
        #         os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu)

        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.5
        set_session(tf.Session(config=config))

        #         os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu)

        BigImg = self.folder + 'sat_original.png'
        BigLane = self.folder + 'final_marks.png'
        bigImg = imread(BigImg, -1)
        bigLane = imread(BigLane, -1) + self.lane_v2
        bigLane_gray = imread(BigLane, 0) + self.lane_v2[:, :, 0]

        bigLine = np.zeros_like((bigImg))

        checkpoint_name = "GAN_v4_label_normal_lanenet2/"
        model_name = 'lanenet2'
        checkpoint_dir_root = "/home/beemap/Documents/cesar-workspace/Segmentation/Keras_Code/keras3/checkpoints/"
        checkpoint_dir = checkpoint_dir_root + checkpoint_name
        print("Loading Model in .... ", checkpoint_dir)
        json_file = open(checkpoint_dir + model_name + ".json", 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        model = model_from_json(loaded_model_json)
        model.load_weights("%s/%s_weights_90.h5" %
                           (checkpoint_dir, model_name))

        for r in range(0, bigImg.shape[0], 256):
            for c in range(0, bigImg.shape[1], 256):
                img = bigImg[r:r + 256, c:c + 256, :3]
                #         h, w, _ = img.shape
                tmp = img[:]
                lane = bigLane[r:r + 256, c:c + 256, :3]
                img = np.float32(img) / 255.0
                lane = np.float32(lane) / 255.0

                #         input_image_gray = data_util.get_image(image,do_aug=[],gray=True, change=False)
                input_image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                w1, w2, w3, w4 = lanenet_wavelet(input_image_gray)
                w1 = np.expand_dims(w1, axis=0)
                w2 = np.expand_dims(w2, axis=0)
                w3 = np.expand_dims(w3, axis=0)
                w4 = np.expand_dims(w4, axis=0)
                lane_gray = bigLane_gray[r:r + 256, c:c + 256]

                hough = hough_lines(lane_gray,
                                    rho=1,
                                    min_line_len=10,
                                    threshold=40,
                                    max_line_gap=300) / 255.0
                mask = model.predict([
                    np.expand_dims(img, axis=0),
                    np.expand_dims(lane, axis=0), w1, w2, w3, w4
                ],
                                     batch_size=None,
                                     verbose=0,
                                     steps=None)

                mask = np.round(mask[0, :, :, 0]).astype(int)
                seg = np.zeros((256, 256, 3))

                seg[:, :, 0] += ((mask[:, :] == 1) * (255)).astype('uint8')
                seg[:, :, 1] += ((mask[:, :] == 1) * (255)).astype('uint8')
                seg[:, :, 2] += ((mask[:, :] == 1) * (255)).astype('uint8')

                bigLine[r:r + 256, c:c + 256, :] = seg
        K.clear_session()
        name = 'BigLine.png'
        new_dir = self.folder
        dest = new_dir + name
        cv2.imwrite(dest, bigLine)
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          learning_rate=0.0001,
          num_epochs=500,
          minibatch_size=32,
          print_cost=True,
          keep_prob=0.75,
          units=64):

    num_epochs = num_epochs + 1
    ops.reset_default_graph()
    K.clear_session()
    (m, n_x) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []
    L2penalty = 0.7 / 2

    X, Y = create_placeholders(n_x, n_y)
    # sets the number of nodes in each hidden layer:
    parameters = initialize_parameters(units)

    out = forward_propagation(X, parameters, units, keep_prob)
    cost = compute_cost(out, Y, parameters, (L2penalty / m))

    starter_learning_rate = learning_rate
    global_step = tf.Variable(0, trainable=False)
    end_learning_rate = learning_rate / 1000
    decay_steps = 1000
    learn_rate = tf.train.polynomial_decay(starter_learning_rate,
                                           global_step,
                                           decay_steps,
                                           end_learning_rate,
                                           power=0.5)
    optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        K.set_session(sess)
        sess.run(init)
        for epoch in range(num_epochs):

            epoch_cost = 0.  # Defines a cost related to an epoch
            num_minibatches = int(
                m / minibatch_size
            )  # number of minibatches of size minibatch_size in the train set
            minibatches = random_mini_batches(X_train.T, Y_train.T,
                                              minibatch_size)
            for minibatch in minibatches:
                (minibatch_X, minibatch_Y) = minibatch
                minibatch_X = minibatch_X.T
                minibatch_Y = minibatch_Y.T
                _, minibatch_cost = sess.run([optimizer, cost],
                                             feed_dict={
                                                 X: minibatch_X,
                                                 Y: minibatch_Y
                                             })
                epoch_cost += minibatch_cost / num_minibatches

            # Print the cost every epoch
            if print_cost == True and epoch % 1 == 0:
                print("Cost after epoch %i: %f" % (epoch, epoch_cost))
            if print_cost == True and epoch == (num_epochs - 1):
                print("Final Cost after epoch %i: %f" % (epoch, epoch_cost))
            if print_cost == True and epoch % 1 == 0:
                costs.append(epoch_cost)

        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        #save the parameters in a variable
        parameters = sess.run(parameters)
        print("Parameters have been trained!")

        # Calculate the correct predictions
        correct_prediction = tf.equal(tf.round(tf.sigmoid(out)), Y)
        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        keep_prob = 1.0
        print("Train Accuracy:%", 100 * accuracy.eval({
            X: X_train,
            Y: Y_train
        }))
        print("Test Accuracy:%", 100 * accuracy.eval({X: X_test, Y: Y_test}))
        print(" ")

        return parameters
Exemplo n.º 59
0
def lstmnet(i):

    print('Fold ', i, ' Processing')
    X_build = train_df[train_df['CVindices'] != i]  # 636112
    #X_build = X_build.groupby('id').first().reset_index() # 331085
    X_val = train_df[train_df['CVindices'] == i]

    trainindex = train_df[train_df['CVindices'] != i].index.tolist()
    valindex = train_df[train_df['CVindices'] == i].index.tolist()

    data_1_train = data_1[trainindex]
    #    data_2_train = data_2[trainindex]
    train_data_reg_train = train_data_reg[trainindex]
    train_data_city_train = train_data_city[trainindex]
    train_data_cat1_train = train_data_cat1[trainindex]
    train_data_cat2_train = train_data_cat2[trainindex]
    train_data_prm1_train = train_data_prm1[trainindex]
    train_data_prm2_train = train_data_prm2[trainindex]
    #    train_data_prm3_train  = train_data_prm3[trainindex]
    #    train_data_sqnm_train  = train_data_sqnm[trainindex]
    train_data_usr_train = train_data_usr[trainindex]
    train_data_itype_train = train_data_itype[trainindex]

    train_user_features_train = train_user_features[trainindex]

    labels_train = labels[trainindex]

    data_1_val = data_1[valindex]
    #    data_2_val = data_2[valindex]

    train_data_reg_val = train_data_reg[valindex]
    train_data_city_val = train_data_city[valindex]
    train_data_cat1_val = train_data_cat1[valindex]
    train_data_cat2_val = train_data_cat2[valindex]
    train_data_prm1_val = train_data_prm1[valindex]
    train_data_prm2_val = train_data_prm2[valindex]
    #    train_data_prm3_val  = train_data_prm3[valindex]
    #    train_data_sqnm_val  = train_data_sqnm[valindex]
    train_data_usr_val = train_data_usr[valindex]
    train_data_itype_val = train_data_itype[valindex]
    train_user_features_val = train_user_features[valindex]

    labels_val = labels[valindex]

    pred_cv = np.zeros(X_val.shape[0])

    pred_test = np.zeros(test_df.shape[0])

    for j in range(1, nbags + 1):
        print('bag ', j, ' Processing')
        bag_cv = np.zeros(X_val.shape[0])

        model = gru_Bidirectional_selfEmbedding_model()
        #        model = gru_attention_model()

        callbacks = [
            EarlyStopping(monitor='val_loss',
                          patience=patience,
                          verbose=VERBOSEFLAG),
            ModelCheckpoint(MODEL_WEIGHTS_FILE,
                            monitor='val_loss',
                            save_best_only=True,
                            verbose=VERBOSEFLAG),
        ]

        #        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=patience, min_lr=1e-9, epsilon = 0.00001, verbose=VERBOSEFLAG)
        folds_nb_epoch = 10
        learning_rate = 1e-3
        #sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
        if optim_type == 'SGD':
            optim = SGD(lr=learning_rate,
                        decay=1e-6,
                        momentum=0.9,
                        nesterov=True)
        else:
            optim = Adam(lr=learning_rate)
        model.compile(optimizer=optim,
                      loss='mean_squared_error',
                      metrics=[root_mean_squared_error])

        model.fit(
            [
                data_1_train, train_data_reg_train, train_data_city_train,
                train_data_cat1_train, train_data_cat2_train,
                train_data_prm1_train, train_data_prm2_train,
                train_data_usr_train, train_data_itype_train,
                train_user_features_train
            ],
            labels_train,
            validation_data=([
                data_1_val, train_data_reg_val, train_data_city_val,
                train_data_cat1_val, train_data_cat2_val, train_data_prm1_val,
                train_data_prm2_val, train_data_usr_val, train_data_itype_val,
                train_user_features_val
            ], labels_val),
            batch_size=batch_size,
            nb_epoch=folds_nb_epoch,
            callbacks=callbacks,  #[callbacks, reduce_lr],
            verbose=VERBOSEFLAG)
        bag_cv = model.predict([
            data_1_val, train_data_reg_val, train_data_city_val,
            train_data_cat1_val, train_data_cat2_val, train_data_prm1_val,
            train_data_prm2_val, train_data_usr_val, train_data_itype_val,
            train_user_features_val
        ],
                               batch_size=batch_size,
                               verbose=VERBOSEFLAG)[:, 0]
        pred_cv += model.predict([
            data_1_val, train_data_reg_val, train_data_city_val,
            train_data_cat1_val, train_data_cat2_val, train_data_prm1_val,
            train_data_prm2_val, train_data_usr_val, train_data_itype_val,
            train_user_features_val
        ],
                                 batch_size=batch_size,
                                 verbose=VERBOSEFLAG)[:, 0]

        pred_test += model.predict([
            test_data_1, test_data_reg, test_data_city, test_data_cat1,
            test_data_cat2, test_data_prm1, test_data_prm2, test_data_usr,
            test_data_itype, test_user_features
        ],
                                   batch_size=batch_size,
                                   verbose=VERBOSEFLAG)[:, 0]

        bag_score = np.sqrt(metrics.mean_squared_error(labels_val, bag_cv))
        print('bag ', j, '- rmse:', bag_score)
        del model
        K.clear_session()
        gc.collect()
    pred_cv /= nbags

    pred_test /= nbags
    fold_score = np.sqrt(metrics.mean_squared_error(labels_val, pred_cv))
    print('Fold ', i, '- rmse:', fold_score)
    pred_cv = pd.DataFrame(pred_cv)
    pred_cv.columns = ["deal_probability"]
    pred_cv["item_id"] = X_val.item_id.values
    pred_cv = pred_cv[['item_id', 'deal_probability']]
    pred_cv['deal_probability'] = pred_cv['deal_probability'].clip(0.0, 1.0)
    sub_valfile = inDir + '/submissions/Prav.nn10.fold' + str(i) + '.csv'
    pred_cv.to_csv(sub_valfile, index=False)

    pred_test = pd.DataFrame(pred_test)
    pred_test.columns = ["deal_probability"]
    pred_test["item_id"] = test_df.item_id.values
    pred_test = pred_test[['item_id', 'deal_probability']]
    pred_test['deal_probability'] = pred_test['deal_probability'].clip(
        0.0, 1.0)
    sub_file = inDir + '/submissions/Prav.nn10.fold' + str(
        i) + '-test' + '.csv'
    pred_test.to_csv(sub_file, index=False)
    del pred_cv
    del pred_test
Exemplo n.º 60
0
def clear_session():
    K.clear_session()
    gc.collect()