def __init__(self):
     self.model = None
     self.optimizer = "adam"
     self.graph = tf.get_default_graph()
     config = ConfigProto()
     config.gpu_options.allow_growth = True
     self.session = Session(config=config)
     keras.backend.set_session(self.session)
Esempio n. 2
0
def define_video_config():
    """
    Определяются параметры использования видеокарты
    """
    # Определяются параметры использования видеокарты
    config_proto = ConfigProto()
    # config_proto.gpu_options.per_process_gpu_memory_fraction = 0.8
    config_proto.gpu_options.allow_growth = True
    session = InteractiveSession(config=config_proto)
Esempio n. 3
0
def prepare_environment():
    np.random.seed(1)
    random.seed(1)

    from tensorflow import ConfigProto
    from tensorflow import InteractiveSession
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    def __init__(self, model_dir, img_list):
        GPUsetting = GPUOptions(per_process_gpu_memory_fraction = 1, allow_growth = True)
        sess = Session(config = ConfigProto(gpu_options = GPUsetting))
        
        self.model_dir = os.path.join(C.check_point_path, model_dir)
                
        self.batch_size = 64        
        self.img_list = img_list
        self.list_size = len(self.img_list)

        self.model = load_model(self.model_dir, custom_objects = {'resize_and_normalize': resize_and_normalize})
Esempio n. 5
0
def _dojob(ready, e, queue):
    prctl.set_name('AI detector - do job')
    global session1, session2, ip_model, mac_model
    ip_graph = Graph()
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    with ip_graph.as_default():
        session1 = Session(config=config)
        with session1.as_default():
            ip_model = K.models.load_model(
                'gru_ip_4tuple.hdf5', custom_objects={'attention': attention})
            ip_model._make_predict_function()

    mac_graph = Graph()
    with mac_graph.as_default():
        session2 = Session(config=config)
        with session2.as_default():
            mac_model = K.models.load_model(
                'gru_mac_4tuple.hdf5', custom_objects={'attention': attention})
            mac_model._make_predict_function()
    ready.set()
    print 'set ready'
    last = time.time()
    global ignore_packet
    while e.is_set() == False:
        if queue.empty() == False:
            obj = queue.get()
            if (obj[0], obj[1]) in ignore_packet:
                if obj[3] <= ignore_packet[(obj[0], obj[1])]:
                    continue
            feature_extract((obj[2], obj[3]))
        if time.time() - last >= polling_interval:
            print queue.qsize()
            global flow_statics, src_addr_list, memory_data

            # calculate features in last 5 seconds
            result = calculate_feature(flow_statics)
            memory_data.pop(0)
            memory_data.append(result)
            t_run_exp = threading.Thread(target=_run_exp,
                                         args=(
                                             result,
                                             src_addr_list,
                                             memory_data,
                                         ))
            t_run_exp.start()
            t_run_exp.join()
            flow_statics = {}
            src_addr_list = {}
            last = time.time()
    K.backend.clear_session()
    del ip_model
    del mac_model
    def config_session(self):
        config = ConfigProto(
            #device_count = {'CPU': 1},
            inter_op_parallelism_threads=6,
            intra_op_parallelism_threads=6,
            allow_soft_placement=True,
        )
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.9

        with tf.device('/job:localhost/replica:0/task:0/device:GPU:0'):
            #graph = tf.get_default_graph()
            sess = Session(config=config)
        return sess
    def load(self, checkpoint_path, model_name='tacotron'):
        print('Constructing model: %s' % model_name)
        inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
        input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
        with tf.variable_scope('model') as scope:
            self.model = create_model(model_name, hparams)
            self.model.initialize(inputs, input_lengths)
            self.wav_output = audio.inv_spectrogram_tensorflow(
                self.model.linear_outputs[0])

        print('Loading checkpoint: %s' % checkpoint_path)
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        self.session = tf.Session(config=config)
        self.session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(self.session, checkpoint_path)
Esempio n. 8
0
def load_model(architecture_file, mtype='base'):
    import models
    from tensorflow import GPUOptions, ConfigProto, Session
    checkdir = '/'.join(architecture_file.split('/')[:-1]) + '/'

    print('\n' * 2, '-' * _repeat_, '\n:: Open Session\n', '-' * _repeat_,
          '\n')
    gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.5)
    config = ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
    sess = Session(config=config)
    print('\n', '-' * _repeat_)

    model = models.__dict__[mtype].__MODEL__()
    pkg = {'model': model, 'architecture': architecture_file, 'dir': checkdir}
    models.base.__MODEL__.load_architecture(pkg)
    model.set_session(sess)
    model.build(training=False)
    model.load(pkg)
    return model