Esempio n. 1
0
def init_tensorflow():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.log_device_placement = True
    sess = tf.Session(config=config)
    set_session(sess)
Esempio n. 2
0
def configure_gpu(gpu):
    global config
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)
Esempio n. 3
0
import os
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import pylab as plt
import keras.backend as K
assert K.image_data_format(
) == 'channels_last', "Backend should be tensorflow and data_format channel_last"
from keras.backend import tf as ktf

config = ktf.ConfigProto()
config.gpu_options.allow_growth = True
session = ktf.Session(config=config)
K.set_session(session)
from tqdm import tqdm


class Trainer(object):
    def __init__(self,
                 dataset,
                 gan,
                 output_dir='output/generated_samples',
                 checkpoints_dir='output/checkpoints',
                 training_ratio=5,
                 display_ratio=1,
                 checkpoint_ratio=10,
                 start_epoch=0,
                 number_of_epochs=100,
                 batch_size=64,
                 **kwargs):
        self.dataset = dataset
Esempio n. 4
0
import keras
from keras import Input, Model, optimizers
from keras.activations import softmax
from keras.backend import batch_dot, tf
from keras.models import load_model
from keras.layers import Dense, Dropout, Bidirectional, GRU, Embedding, TimeDistributed, GlobalMaxPooling1D, \
    concatenate, Lambda, Dot, Permute, Concatenate, Multiply, Add
from keras.optimizers import RMSprop
from sklearn.metrics import f1_score
import pickle
import numpy as np
from keras import backend as K
import logging
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
from utils.batch_gather import batch_gather
from keras.callbacks import Callback
logging.basicConfig(level=logging.INFO, filename='bert_hua_cea.log')
from utils.CyclicLR import CyclicLR
from utils.threshold import threshold_search, f1, count, load_pkl, save_file
from keras_bert import load_vocabulary, load_trained_model_from_checkpoint, Tokenizer

pretrained_path = 'scibert_scivocab_uncased'
config_path = os.path.join(pretrained_path, 'bert_config.json')
checkpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')
vocab_path = os.path.join(pretrained_path, 'vocab.txt')

batch_size = 16
bert_out_shape = 768
max_sentence_length = 512