Ejemplo n.º 1
0
def deactive_train(json_str):
    data = json_str
    if data == 'deactive_train':
        ######################
        sess = get_session()
        clear_session()
        sess.close()
        sess = get_session()
        try:
            del my_detector
            del multipeople_classifier
            del multiperson_tracker
        except:
            pass
        print(gc.collect(
        ))  # if it's done something you should see a number being outputted

        # use the same config as you used to create the session
        config = tensorflow.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 1
        config.gpu_options.visible_device_list = "0"
        set_session(tensorflow.Session(config=config))
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        ######################
        print(60 * 'k')
        repliesmess = "done_deactive"
        print(repliesmess)
        socketio.emit('done_deactive_train', data=repliesmess)
Ejemplo n.º 2
0
def main(uploadfile=""):
    x_test = []
    # データの読み込み
    # filepath = "dataset/cat/cat.0.jpg"
    filepath = ""
    if uploadfile is None:
        raise NameError("写真が選択されていません")
    else:
        filepath = uploadfile
    image = np.array(Image.open(filepath))
    image = load_img(filepath, target_size=(512, 512))
    image = image.convert("L")
    image = img_to_array(image)
    x_test.append(image / 255.)
    x_test = np.array(x_test)

    # 機械学習器を復元
    model = model_from_json(open('model_convertcolor', 'r').read())
    model.load_weights('model_convertcolor.hdf5')
    encoded_imgs = model.predict(x_test)
    # print(encoded_imgs[0])
    # cv2.imwrite("encoded_imgs.jpg", encoded_imgs[0])
    plt.imshow(encoded_imgs[0].reshape(512, 512, 3))
    plt.show()

    # 評価終了時に明示的にセッションをクリア
    backend.clear_session()
Ejemplo n.º 3
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()

    # if it's done something you should see a number being outputted
    print("\nGarbage Collector: ", gc.collect())
Ejemplo n.º 4
0
def reset_keras(model):
    """
    Resets keras session
    Parameters
    ----------
    model: Model to clear

    Returns
    -------

    """
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del model  # this is from global space - change this as you need
    except:
        pass

    # use the same config as you used to create the session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(
        sess)  # set this TensorFlow session as the default session for Keras
Ejemplo n.º 5
0
def reset_keras():
    """Resets a Keras session and clears memory."""

    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del network  # this is from global space - change this as you need
    except:
        pass

    try:
        del network_model  # this is from global space - change this as you need
    except:
        pass

    print(gc.collect()
          )  # if it's done something you should see a number being outputted

    # use the same config as you used to create the session
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = gpu_memory
    config.gpu_options.visible_device_list = '0'
    set_session(tf.Session(config=config))
Ejemplo n.º 6
0
def run_trial(_id, hyperparams, optimizer_options):
    trial_dir = create_trial_dir(_id)
    model_path = os.path.join(trial_dir, 'model {}.h5'.format(_id))
    checkpoint = ModelCheckpoint(model_path, save_best_only=True)
    optimizer_type = optimizer_options.get('type')
    lr = optimizer_options.get('learning_rate')
    if optimizer_type == 'sgd':
        momentum = optimizer_options.get('momentum')
        optimizer = optimizers.get(optimizer_type)(lr, momentum)
    else:
        optimizer = optimizers.get(optimizer_type)(lr)
    loss = hyperparams.get('loss')
    epochs = hyperparams.get('epochs')
    model = get_model()
    model.compile(optimizer=optimizer, loss=loss)
    print('Start Training for Model {}'.format(_id))
    model.fit_generator(train_generator,
                        epochs=epochs,
                        validation_data=valid_generator,
                        callbacks=[checkpoint])
    history = model.history.history
    history_path = os.path.join(trial_dir, 'history {}'.format(_id))
    log_summary(_id, history)
    with open(history_path, 'wb') as handler:
        pickle.dump(history, handler)
    clear_session()
    print('End Training for Model {} Successfully'.format(_id))
Ejemplo n.º 7
0
 def __init__(self,latent_dim=100,im_dim=28,epochs=100,batch_size=256,learning_rate=0.0004,
              g_factor=0.25,droprate=0.25,momentum=0.8,alpha=0.2,saving_rate=10):
     # define and store local variables
     clear_session()
     self.latent_dim = latent_dim
     self.im_dim = im_dim
     self.epochs = epochs
     self.batch_size = batch_size
     self.learning_rate = learning_rate
     self.g_factor = g_factor
     self.optimizer_d = Adam(self.learning_rate)
     self.optimizer_g = Adam(self.learning_rate*self.g_factor)
     self.droprate = droprate
     self.momentum = momentum
     self.alpha = alpha
     self.saving_rate = saving_rate
     # define and compile discriminator
     self.discriminator = self.getDiscriminator(self.im_dim,self.droprate,self.momentum,
                                                self.alpha)
     self.discriminator.compile(loss=['binary_crossentropy'], optimizer=self.optimizer_d,
         metrics=['accuracy'])
     # define generator
     self.generator = self.getGenerator(self.latent_dim,self.momentum)
     self.discriminator.trainable = False
     # define combined network with partial gradient application
     z = Input(shape=(self.latent_dim,))
     img = self.generator(z)
     validity = self.discriminator(img)
     self.combined = Model(z, validity)
     self.combined.compile(loss=['binary_crossentropy'], optimizer=self.optimizer_g,
                           metrics=['accuracy'])
Ejemplo n.º 8
0
def infer():
    """ Given a model handle and input values, this def runs the model inference graph and returns the predictions.
  Args: Model handle, input values.
  Returns: A JSON containing all the model predictions.
  """
    args = {k: v for k, v in request.forms.iteritems()}
    print(args)
    # clear_session()  # Clears TF graphs.
    clear_session()  # Clears TF graphs.
    clear_thread_cache(
    )  # We need to clear keras models since graph is deleted.
    try:
        model = get_model(args['handle'])
    except Exception as e:
        return json.dumps({
            'status': 'ERROR',
            'why': 'Infer: Model probably not found ' + str(e)
        })

    if 'values' not in args:
        return json.dumps({'status': 'ERROR', 'why': 'No values specified'})
    print(args['handle'])
    print(args['values'])
    outputs = model.infer(json.loads(args['values']))
    return json.dumps({'status': 'OK', 'result': outputs})
Ejemplo n.º 9
0
 def close():
     from keras import backend as K
     if K.backend() == 'tensorflow':
         import keras.backend.tensorflow_backend as tfb
         tfb.clear_session()
         # tfb.get_session().close()
         logger.info('tensorflow session clear')
Ejemplo n.º 10
0
 def __init__(self,num_classes,latent_dim=100,im_dim=28,epochs=100,batch_size=256,
              learning_rate=0.0004,g_factor=0.25,droprate=0.25,momentum=0.8,alpha=0.2,saving_rate=10):
     # define and store local variables
     clear_session()
     self.num_classes = num_classes
     self.latent_dim = latent_dim
     self.im_dim = im_dim
     self.epochs = epochs
     self.batch_size = batch_size
     self.learning_rate = learning_rate
     self.g_factor = g_factor
     self.optimizer_d = Adam(self.learning_rate)
     self.optimizer_g = Adam(self.learning_rate*self.g_factor)
     self.droprate = droprate
     self.momentum = momentum
     self.alpha = alpha
     self.saving_rate = saving_rate
     # define and compile discriminator
     self.discriminator = self.getDiscriminator(self.im_dim,self.droprate,self.momentum,
                                                self.alpha,self.num_classes)
     self.discriminator.compile(loss=['binary_crossentropy'], optimizer=self.optimizer_d)
     # define generator
     self.generator = self.getGenerator(self.latent_dim,self.momentum,
                                        self.alpha,self.num_classes)
     self.discriminator.trainable = False
     # define combined network with partial gradient application
     noise = Input(shape=(self.latent_dim,))
     label = Input(shape=(1,),dtype="int32")
     img = self.generator([noise, label])
     validity = self.discriminator([img,label])
     self.combined = Model([noise,label], validity)
     self.combined.compile(loss=['binary_crossentropy'], optimizer=self.optimizer_g)
Ejemplo n.º 11
0
def generate(conf_path, n, epoch, prefix_words, ignore_words):
    with open(conf_path) as f:
        conf = yaml.load(f)

    print("== initialize tokenizer ==")
    token_files = glob.glob(conf["input_token_files"])
    tokenizer = create_tokenizer(token_files, num_words=conf["num_vocab"])
    print("output vocab size:", tokenizer.num_words)
    print("| + <UNK> token")
    inverse_vocab = {idx: w for w, idx in tokenizer.word_index.items()}

    print("load model")
    print("> create instance")
    model = ThreadTitleGenerator(**conf["model_params"])
    print("> load model")
    model.load(conf["model_path"], epoch)
    print("> print summary")
    model.print_summary()
    print("generate words!")
    end_token_idx = tokenizer.word_index[END_TOKEN]
    prefix_tokens = [
        tokenizer.word_index[t] for t in [START_TOKEN] + prefix_words
    ]
    ignore_idx = [tokenizer.word_index[t] for t in ignore_words] \
                  + [conf["num_vocab"] + 1]  # unk_idx
    ret = model.gen_nbest(prefix_tokens, end_token_idx, ignore_idx, n=n)

    print(ret)

    print("convert to readable tokens")
    for tokens, prob in ret:
        title = [inverse_vocab.get(idx, "???") for idx in tokens]
        print(" ".join(title))
        print(prob)
    K.clear_session()
Ejemplo n.º 12
0
def main():
    fscores = []
    prs = []
    rocs = []
    with Parallel(n_jobs=1, verbose=15,
                  backend='multiprocessing') as parallel_pool:
        for index in range(ITERATIONS):
            keras_backend.clear_session()
            keras_session = tensorflow.Session()
            keras_backend.set_session(keras_session)

            print('ITERATION #%s' % str(index + 1))
            pr, roc, fscore = fcnhface(args, parallel_pool)
            fscores.append(fscore)
            prs.append(pr)
            rocs.append(roc)

            with open('./files/plot_' + OUTPUT_NAME + '.file', 'w') as outfile:
                pickle.dump([prs, rocs], outfile)

            plot_precision_recall(prs, OUTPUT_NAME)
            plot_roc_curve(rocs, OUTPUT_NAME)

    means = mean_results(fscores)
    with open('./values/' + OUTPUT_NAME + '.txt', 'a') as outvalue:
        for item in fscores:
            outvalue.write(str(item) + '\n')
        for item in means:
            outvalue.write(str(item) + '\n')
    print(fscores)
Ejemplo n.º 13
0
def faultPredictor():

    try:
        from random import randint

        dataframe = pd.read_csv(
            "./api/ml_operations/pickles/random_cutout_test.csv")
        dataset = dataframe.values

        rand = randint(1, 32)
        X = dataset[:rand, 0:200].astype(float)
        y = dataset[:rand, -1].astype(int)

        model = getModel(settings.model_path)
        x_normed = normalize_input(X, settings.path_x_normalizer)

        y_pred = model.predict_classes(x_normed)

        y_pred_actual = np.stack((y_pred, y), axis=1)

        predictions, actuals = denormalize_prediction(
            y_pred_actual, settings.path_y_normalizer)

        combine_outputs = np.stack((predictions, actuals), axis=1)
        output_vals = json.dumps(combine_outputs.tolist())

    except:

        return Exception("Something Went Wrong")

    from keras.backend.tensorflow_backend import clear_session
    clear_session()

    return output_vals
def train():
    global args
    args = parser.parse_args()
    print(args)

    train_videos = PennAction(frames_path='data/PennAction/train/frames/',
                              labels_path='data/PennAction/train/labels',
                              batch_size=args.batch_size,
                              num_frames_sampled=args.num_frames_sampled)
    valid_videos = PennAction(frames_path='data/PennAction/validation/frames',
                              labels_path='data/PennAction/validation/labels',
                              batch_size=args.batch_size,
                              num_frames_sampled=args.num_frames_sampled,
                              shuffle=False)

    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=np.sqrt(0.1),
                                  patience=5,
                                  verbose=1)
    save_best = ModelCheckpoint(args.filepath,
                                monitor='val_acc',
                                verbose=1,
                                save_best_only=True,
                                mode='max')
    callbacks = [save_best, reduce_lr]

    if os.path.exists(args.filepath):
        model = load_model(args.filepath)
    else:
        model = VGG19_GRU(frames_input_shape=(args.num_frames_sampled, 224,
                                              224, 3),
                          poses_input_shape=(args.num_frames_sampled, 26),
                          classes=len(train_videos.labels))
        model.compile(optimizer=Adam(lr=args.train_lr, decay=1e-5),
                      loss='categorical_crossentropy',
                      metrics=['acc'])
    print('Train the GRU component only')
    model.fit_generator(generator=train_videos,
                        epochs=args.epochs,
                        callbacks=callbacks,
                        workers=args.num_workers,
                        validation_data=valid_videos)

    # Clear session to avoid exhausting GPU's memory
    # Reload the model architecture and weights, unfreeze the last 2 convolutional layers in VGG19's block 5
    # Recompile the model with halved learning rate (from the last checkpoint)
    K.clear_session()
    model = load_model(args.filepath)
    model.layers[-9].trainable = True
    model.layers[-10].trainable = True
    model.compile(optimizer=Adam(lr=K.get_value(model.optimizer.lr) * 0.5,
                                 decay=1e-5),
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    print('Fine-tune top 2 convolutional layers of VGG19')
    model.fit_generator(generator=train_videos,
                        epochs=args.epochs,
                        callbacks=callbacks,
                        workers=args.num_workers,
                        validation_data=valid_videos)
Ejemplo n.º 15
0
def predict(model, files, x_test, y_test):
    arr6 = model.predict_classes(x_test)
    res = model.evaluate(x_test, y_test)
    backend.clear_session()
    for r, f in zip(arr6, files):
        print('{}: {}'.format(f, r))
    print('Accuracy: {}%'.format(res[1] * 100))
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del classifier  # this is from global space - change this as you need
    except:
        pass

    #print(gc.collect()) # if it's done something you should see a number being outputted
    ###################################
    # TensorFlow wizardry
    config = tensorflow.ConfigProto()

    # Don't pre-allocate memory; allocate as-needed
    config.gpu_options.allow_growth = True

    # Only allow a total of half the GPU memory to be allocated
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5

    # Create a session with the above options specified.
    K.tensorflow_backend.set_session(tensorflow.Session(config=config))
    print("available gpu divice: {}".format(tensorflow.test.gpu_device_name()))

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
Ejemplo n.º 17
0
def reset_keras():
    """
    Releases keras session
    """
    sess = get_session()
    clear_session()
    sess.close()
Ejemplo n.º 18
0
def detect(upload_image):
    result_name = upload_image.name
    result_list = []
    result_img = ''

    cascade_file_path = settings.CASCADE_FILE_PATH
    model_file_path = settings.MODEL_FILE_PATH
    #model = keras.models.load_model(model_file_path)
    model = tf.keras.models.load_model(model_file_path)

    image = np.asarray(Image.open(upload_image))

    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_gs = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
    # 1) cascade사용하기 위한 CascadeClassifier 생성
    cascade = cv2.CascadeClassifier(cascade_file_path)
    # 2) OpenCV이용해서 얼굴 인식 함수 호출 -> detectMultiScale()
    faces = cascade.detectMultiScale(image_gs,
                                     scaleFactor=1.1,
                                     minNeighbors=5,
                                     minSize=(64, 64))

    # 3) 얼굴 인식 개수는?
    if len(faces) > 0:
        count = 1
        for (xpos, ypos, width, height) in faces:
            face_image = image_rgb[ypos:ypos + height, xpos:xpos + width]

            # 4) 64보다 작으면 무시
            if face_image.shape[0] < 64 or face_image.shape[1] < 64:
                continue
            # 5) 64보다 크다면 64로 resize
            face_image = cv2.resize(face_image, (64, 64))

            # 6) 붉은 색 사각형 표시
            cv2.rectangle(image_rgb, (xpos, ypos),
                          (xpos + width, ypos + height), (255, 0, 0),
                          thickness=2)
            face_image = np.expand_dims(face_image, axis=0)
            name, result = detect_who(model, face_image)

            # 7) 인식 된 얼굴의 이름 표기
            cv2.putText(image_rgb, name, (xpos, ypos + height + 20),
                        cv2.FONT_HERSHEY_DUPLEX, 1, (255, 0, 0), 2)
            result_list.append(result)
            count = count + 1

    # 8) 이미지를 PNG파일로 변환
    is_success, img_buffer = cv2.imencode(".png", image_rgb)
    if is_success:
        # 이미지 -> 메인 메모리의 바이너리 형태
        io_buffer = io.BytesIO(img_buffer)
        result_img = base64.b64encode(io_buffer.getvalue()).decode().replace(
            "'", "")

    # 9) tensorflow에서 session이 닫히지 않는 문제
    backend.clear_session()

    return (result_list, result_name, result_img)
Ejemplo n.º 19
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()

    # use the same config as you used to create the session
    config = tf.ConfigProto() #allow_soft_placement=True, log_device_placement=True)
    set_session(tf.Session(config=config))
Ejemplo n.º 20
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))
    print("Keras backend has been reset")
def main():
    X_train, X_test, y_train, y_test = np.load("./image/3obj.npy")
    X_train = X_train.astype("float") / 256
    X_test = X_test.astype("float") / 256
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)
    model = model_train(X_train, y_train, X_test, y_test)
    model_eval(model, X_test, y_test)
    backend.clear_session()
Ejemplo n.º 22
0
def reset_keras():
    clear_session()

    try:
        del classifier  # this is from global space - change this as you need
    except:
        pass

    print(gc.collect()
          )  # if it's done something you should see a number being outputted
Ejemplo n.º 23
0
def reset_memory():
    sess = get_session()
    clear_session()
    sess.close()

    # use the same config as you used to create the session
    config = tf.ConfigProto(
    )  #allow_soft_placement=True, log_device_placement=True)
    set_session(tf.Session(config=config))
    print("clean memory", gc.collect())
Ejemplo n.º 24
0
def reset_keras():
    """ Cбрасывает Keras Session."""
    sess = get_session()
    clear_session()
    sess.close()

    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
Ejemplo n.º 25
0
def reset_keras(classifier):
    sess = K.get_session()
    clear_session()
    sess.close()
    sess = K.get_session()

    try:
        del classifier  # this is from global space - change this as you need
    except:
        pass
Ejemplo n.º 26
0
    def load_model(self):
        """Load a model from a provided path"""
        try:
            tensorflow_backend.clear_session()
            self._find_latest_model_path()
            self.model = load_model(self._find_latest_model_path())
            self.graph = tf.get_default_graph()

        except Exception as e:
            print('Could not load model:', str(e))
Ejemplo n.º 27
0
def do_train():
    data = load_data('../data/round1_train_0907.json')  # 加载数据

    # 交叉验证
    kf = KFold(n_splits=n, shuffle=True, random_state=SEED)
    for fold, (trn_idx, val_idx) in enumerate(kf.split(data), 1):
        print(f'Fold {fold}')

        # 配置Tensorflow Session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
        sess = tf.Session(config=config)
        KTF.set_session(sess)

        # 划分训练集和验证集
        train_data = [data[i] for i in trn_idx]
        valid_data = [data[i] for i in val_idx]

        train_generator = data_generator(train_data, batch_size, random=True)

        model, train_model = build_model()  # 构建模型

        adversarial_training(train_model, 'Embedding-Token', 0.5)  # 对抗训练

        # 问题生成器
        qg = QuestionGeneration(model,
                                start_id=None,
                                end_id=tokenizer._token_dict['?'],
                                maxlen=max_q_len)

        # 设置回调函数
        callbacks = [
            Evaluator(valid_data, qg),
            EarlyStopping(monitor='val_rouge_l',
                          patience=1,
                          verbose=1,
                          mode='max'),
            ModelCheckpoint(f'../user_data/model_data/fold-{fold}.h5',
                            monitor='val_rouge_l',
                            save_weights_only=True,
                            save_best_only=True,
                            verbose=1,
                            mode='max'),
        ]

        # 模型训练
        train_model.fit_generator(
            train_generator.forfit(),
            steps_per_epoch=len(train_generator),
            epochs=epochs,
            callbacks=callbacks,
        )

        KTF.clear_session()
        sess.close()
Ejemplo n.º 28
0
def detect(upload_image):
    result_name = upload_image.name
    result_list = []
    result_img = ''

    cascade_file_path = settings.CASCADE_FILE_PATH
    model_file_path = settings.MODEL_FILE_PATH

    model = keras.models.load_model(model_file_path)

    image = np.asarray(Image.open(upload_image))
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_gs = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)

    cascade = cv2.CascadeClassifier(cascade_file_path)
    face_list = cascade.detectMultiScale(image_gs,
                                         scaleFactor=1.11,
                                         minNeighbors=5,
                                         minSize=(64, 64))

    # 顔が1つ以上検出できた場合
    if len(face_list) > 0:
        count = 1
        for (xpos, ypos, width, height) in face_list:
            # 認識した顔の切り抜き
            face_image = image_rgb[ypos:ypos + height, xpos:xpos + width]
            if face_image.shape[0] < 64 or face_image.shape[1] < 64:
                continue
            # 認識した顔のサイズ縮小
            face_image = cv2.resize(face_image, (64, 64))
            # 認識した顔のまわりを赤枠で囲む
            cv2.rectangle(image_rgb, (xpos, ypos),
                          (xpos + width, ypos + height), (0, 0, 255),
                          thickness=2)

            # 認識した顔を1枚の画像を含む配列に変換
            face_image = np.expand_dims(face_image, axis=0)
            # 認識した顔から名前を特定
            name, result = detect_who(model, face_image)
            # 認識した顔に名前を描画
            cv2.putText(image_rgb, f"{count}. {name}",
                        (xpos, ypos + height + 20), cv2.FONT_HERSHEY_DUPLEX, 1,
                        (0, 0, 255), 2)
            # 結果をリストに格納
            result_list.append(result)
            count = count + 1

    is_success, img_buffer = cv2.imencode(".png", image_rgb)
    if is_success:
        io_buffer = io.BytesIO(img_buffer)
        result_img = base64.b64encode(io_buffer.getvalue()).decode().replace(
            "'", "")

    backend.clear_session()
    return (result_list, result_name, result_img)
Ejemplo n.º 29
0
	def returnProbabilities(self,pred_probs):
		encoder=LabelEncoder()
		encoder.classes_ = numpy.load('/home/ccenter/new/17-02-2017_clone/BE/Data/Models/Model1/ensemble.npy')
		print(pred_probs)
		probs = self.model.predict_proba(np.array([pred_probs]),verbose=1)
		print(probs)
		for i in range (10):
			self.final_probabilities[encoder.inverse_transform(i)]=probs[0][i]
		print(self.final_probabilities)
		
		clear_session()
Ejemplo n.º 30
0
def get_embedding(image_file):
    img = readImg(image_file)
    with K.tf.device('/cpu:0'):
        input_image = Input(shape=image_shape + (1, ))
        c_ae = model(None, input_image)
        c_ae.load_weights(os.path.join(os.getcwd(), model_path))
        embd_model = Model(inputs=c_ae.input,
                           outputs=c_ae.get_layer('embeddings').output)
        embd_value = embd_model.predict(img)
    K.clear_session()
    return embd_value
Ejemplo n.º 31
0
    def __init__(self, input_shape, encoding_dim=512, load_path=None, logger=None):
        b.clear_session()  # To avoid memory leaks when instantiating the network in a loop
        self.dim_ordering = "th"  # (samples, filters, rows, cols)
        self.input_shape = input_shape
        self.encoding_dim = encoding_dim
        self.dropout_prob = 0.5
        self.logger = logger

        # Build network
        self.input = Input(shape=self.input_shape)

        self.hidden = Convolution2D(
            32, 8, 8, border_mode="valid", activation="relu", subsample=(4, 4), dim_ordering="th"
        )(self.input)
        self.hidden = Convolution2D(
            64, 4, 4, border_mode="valid", activation="relu", subsample=(2, 2), dim_ordering="th"
        )(self.hidden)
        self.hidden = Convolution2D(
            64, 3, 3, border_mode="valid", activation="relu", subsample=(1, 1), dim_ordering="th"
        )(self.hidden)

        self.hidden = Flatten()(self.hidden)
        self.features = Dense(self.encoding_dim, activation="relu")(self.hidden)
        self.output = Dense(1, activation="sigmoid")(self.features)

        # Models
        self.model = Model(input=self.input, output=self.output)
        self.encoder = Model(input=self.input, output=self.features)

        # Optimization algorithm
        try:
            self.optimizer = Adam()
        except NameError:
            self.optimizer = RMSprop()

        # Load the network from saved model
        if load_path is not None:
            self.load(load_path)

        self.model.compile(optimizer=self.optimizer, loss="mse", metrics=["accuracy"])

        # Save the architecture
        if self.logger is not None:
            with open(self.logger.path + "architecture.json", "w") as f:
                f.write(self.model.to_json())
                f.close()
Ejemplo n.º 32
0
    def __init__(self, input_shape, encoding_dim=49, load_path=None, logger=None):
        b.clear_session()  # To avoid memory leaks when instantiating the network in a loop
        self.dim_ordering = 'th'  # (samples, filters, rows, cols)
        self.input_shape = input_shape
        self.encoding_dim = encoding_dim
        self.decoding_available = False
        self.dropout_prob = 0.5
        self.logger = logger

        # Build network
        """
        self.inputs = Input(shape=self.input_shape)

        # Encoding layers
        self.encoded = Convolution2D(32, 3, 3, subsample=(3, 3), border_mode='valid', activation='relu', dim_ordering=self.dim_ordering)(self.inputs)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)
        self.encoded = Convolution2D(16, 2, 2, subsample=(2, 2), border_mode='valid', activation='relu', dim_ordering=self.dim_ordering)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)
        self.encoded = Convolution2D(1, 2, 2, subsample=(2, 2), border_mode='valid', activation='relu', dim_ordering=self.dim_ordering)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)

        # self.encoded = Flatten()(self.encoded)
        # self.encoded = Dense(7 * 7, activation='tanh', W_regularizer=l2(), name='encoded')(self.encoded)
        #
        # # Decoding layers
        # self.decoded = Reshape((1, 14, 14))(self.encoded)

        self.decoded = Convolution2D(1, 2, 2, border_mode='same', activation='relu', dim_ordering=self.dim_ordering)(self.encoded)
        self.decoded = UpSampling2D(size=(2, 2), dim_ordering=self.dim_ordering)(self.decoded)
        self.decoded = Convolution2D(16, 2, 2, border_mode='same', activation='relu', dim_ordering=self.dim_ordering)(self.decoded)
        self.decoded = UpSampling2D(size=(2, 2), dim_ordering=self.dim_ordering)(self.decoded)
        self.decoded = Convolution2D(32, 3, 3, border_mode='same', activation='relu', dim_ordering=self.dim_ordering)(self.decoded)
        self.decoded = UpSampling2D(size=(3, 3), dim_ordering=self.dim_ordering)(self.decoded)
        self.decoded = Convolution2D(self.input_shape[0], 2, 2, border_mode='same', activation='sigmoid', dim_ordering=self.dim_ordering)(self.decoded)
        """

        self.inputs = Input(shape=self.input_shape)
        self.encoded_input = Input(shape=(self.encoding_dim,))

        self.encoded = Dense(self.input_shape[0] / 16)(self.inputs)
        self.encoded = LeakyReLU(alpha=0.01)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)

        self.encoded = Dense(self.input_shape[0] / 32)(self.encoded)
        self.encoded = LeakyReLU(alpha=0.01)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)

        self.encoded = Dense(self.input_shape[0] / 64)(self.encoded)
        self.encoded = LeakyReLU(alpha=0.01)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)

        self.encoded = Dense(self.input_shape[0] / 128)(self.encoded)
        self.encoded = LeakyReLU(alpha=0.01)(self.encoded)
        self.encoded = Dropout(self.dropout_prob)(self.encoded)

        self.encoded = Dense(self.encoding_dim, name='encoded', W_regularizer=l2())(self.encoded)
        self.encoded = LeakyReLU(alpha=0.01)(self.encoded)

        self.decoded = Dense(self.input_shape[0] / 128)(self.encoded)
        self.decoded = LeakyReLU(alpha=0.01)(self.decoded)

        self.decoded = Dense(self.input_shape[0] / 64)(self.decoded)
        self.decoded = LeakyReLU(alpha=0.01)(self.decoded)

        self.decoded = Dense(self.input_shape[0] / 32)(self.decoded)
        self.decoded = LeakyReLU(alpha=0.01)(self.decoded)

        self.decoded = Dense(self.input_shape[0] / 16)(self.decoded)
        self.decoded = LeakyReLU(alpha=0.01)(self.decoded)

        self.decoded = Dense(self.input_shape[0], activation='sigmoid')(self.decoded)

        # Models
        self.autoencoder = Model(input=self.inputs, output=self.decoded)
        self.encoder = Model(input=self.inputs, output=self.encoded)

        # Build decoder model
        if self.decoding_available:
            self.decoding_intermediate = self.autoencoder.layers[-6](self.encoded_input)
            self.decoding_intermediate = self.autoencoder.layers[-5](self.decoding_intermediate)
            self.decoding_intermediate = self.autoencoder.layers[-4](self.decoding_intermediate)
            self.decoding_intermediate = self.autoencoder.layers[-3](self.decoding_intermediate)
            self.decoding_intermediate = self.autoencoder.layers[-2](self.decoding_intermediate)
            self.decoding_output = self.autoencoder.layers[-1](self.decoding_intermediate)
            self.decoder = Model(input=self.encoded_input, output=self.decoding_output)

        # Optimization algorithm
        try:
            self.optimizer = Adam()
        except NameError:
            self.optimizer = RMSprop()

        # Load the network from saved model
        if load_path is not None:
            self.load(load_path)

        self.autoencoder.compile(optimizer=self.optimizer, loss=self.contractive_loss, metrics=['accuracy'])

        # Save the architecture
        if self.logger is not None:
            with open(self.logger.path + 'architecture.json', 'w') as f:
                f.write(self.autoencoder.to_json())
                f.close()
Ejemplo n.º 33
0
    root = parsed['output']
    nEpochs = int(parsed['epochs'])
    option = parsed['action']
    network_type = parsed['model']    
    noise = float(parsed['noise'])
    depth = int(parsed['depth'])
    activation = parsed['activation']
    lr = float(parsed['lr'])
    lr_multiplier = float(parsed['lr_multiplier'])
    batch_size = int(parsed['batchsize'])
    nkernels = int(parsed['kernels'])
    ninputs = int(parsed['ninputs'])

# Save parameters used
    with open("{0}_{1}_args.json".format(root, depth), 'w') as f:
        json.dump(parsed, f)

    out = deep_network(root, noise, option, depth, network_type, activation, lr, lr_multiplier, batch_size, nkernels, ninputs)

    if (option == 'start'):           
        out.define_network()        
        
    if (option == 'continue' or option == 'predict'):
        out.read_network()

    if (option == 'start' or option == 'continue'):
        out.compile_network()
        out.train(nEpochs)

    ktf.clear_session()