def yolo_eval(graph,
              yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    with graph.as_default():
        box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
        boxes = yolo_boxes_to_corners(graph, box_xy, box_wh)
        boxes, scores, classes = yolo_filter_boxes(graph,
                                                   boxes,
                                                   box_confidence,
                                                   box_class_probs,
                                                   threshold=score_threshold)

        # Scale boxes back to original image shape.
        height = image_shape[0]
        width = image_shape[1]
        image_dims = K.stack([height, width, height, width])
        image_dims = K.reshape(image_dims, [1, 4])
        boxes = boxes * image_dims

        # TODO: Something must be done about this ugly hack!
        max_boxes_tensor = K.variable(max_boxes, dtype='int32')
        K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
        nms_index = tf.image.non_max_suppression(boxes,
                                                 scores,
                                                 max_boxes_tensor,
                                                 iou_threshold=iou_threshold)
        boxes = K.gather(boxes, nms_index)
        scores = K.gather(scores, nms_index)
        classes = K.gather(classes, nms_index)

        return boxes, scores, classes
def deactive_train(json_str):
    data = json_str
    if data == 'deactive_train':
        ######################
        sess = get_session()
        clear_session()
        sess.close()
        sess = get_session()
        try:
            del my_detector
            del multipeople_classifier
            del multiperson_tracker
        except:
            pass
        print(gc.collect(
        ))  # if it's done something you should see a number being outputted

        # use the same config as you used to create the session
        config = tensorflow.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 1
        config.gpu_options.visible_device_list = "0"
        set_session(tensorflow.Session(config=config))
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        ######################
        print(60 * 'k')
        repliesmess = "done_deactive"
        print(repliesmess)
        socketio.emit('done_deactive_train', data=repliesmess)
示例#3
0
文件: KFold.py 项目: DavidSolanas/TFG
def reset_keras(model):
    """
    Resets keras session
    Parameters
    ----------
    model: Model to clear

    Returns
    -------

    """
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del model  # this is from global space - change this as you need
    except:
        pass

    # use the same config as you used to create the session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(
        sess)  # set this TensorFlow session as the default session for Keras
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del classifier  # this is from global space - change this as you need
    except:
        pass

    #print(gc.collect()) # if it's done something you should see a number being outputted
    ###################################
    # TensorFlow wizardry
    config = tensorflow.ConfigProto()

    # Don't pre-allocate memory; allocate as-needed
    config.gpu_options.allow_growth = True

    # Only allow a total of half the GPU memory to be allocated
    #config.gpu_options.per_process_gpu_memory_fraction = 0.5

    # Create a session with the above options specified.
    K.tensorflow_backend.set_session(tensorflow.Session(config=config))
    print("available gpu divice: {}".format(tensorflow.test.gpu_device_name()))

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
示例#5
0
def reset_keras():
    """Resets a Keras session and clears memory."""

    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del network  # this is from global space - change this as you need
    except:
        pass

    try:
        del network_model  # this is from global space - change this as you need
    except:
        pass

    print(gc.collect()
          )  # if it's done something you should see a number being outputted

    # use the same config as you used to create the session
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = gpu_memory
    config.gpu_options.visible_device_list = '0'
    set_session(tf.Session(config=config))
示例#6
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tf.Session(config=config))
    print("Keras backend has been reset")
示例#7
0
def reset_keras(model):
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del model # this is from global space - change this as you need
    except:
        pass

    print(gc.collect()) # if it's done something you should see a number being outputted
def reset_keras_tf_session():
    """
    this function clears the gpu memory and set the 
    tf session to not use the whole gpu
    """
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    config = tensorflow.ConfigProto()
    config.gpu_options.allow_growth = True
    set_session(tensorflow.Session(config=config))
示例#9
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    print(gc.collect()
          )  # if it's done something you should see a number being outputted

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
示例#10
0
文件: process.py 项目: iamysk/SurvCNN
 def reset_keras(self):
     print("Restarting Keras Session...")
     sess = get_session()
     clear_session()
     sess.close()
     sess = get_session()
     try:
         del model
     except:
         pass
     config = ConfigProto()
     config.gpu_options.allow_growth = True
     session = InteractiveSession(config=config)
     print('Done!')
def clearGPUMemory():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
    print("Garbage collected:", gc.collect())
    cuda.select_device(0)
    cuda.close()
示例#12
0
def reset_keras():
    import tensorflow
    from keras.backend.tensorflow_backend import set_session
    from keras.backend.tensorflow_backend import clear_session
    from keras.backend.tensorflow_backend import get_session

    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
 def cnn_test_color(self, trigger_type, gain, exp, classnamelist, cvv):
     # ニュートラルネットワークで使用するモデル作成
     model_filename = 'cnn_model.json'
     weights_filename = 'cnn_weights.hdf5'
     old_session = KTF.get_session()
     #show_infrared_camera = None
     #show_infrared_camera = ShowInfraredCamera()
     with tf.Graph().as_default():
         session = tf.Session('')
         KTF.set_session(session)
         json_string = open(os.path.join(self.f_model,
                                         model_filename)).read()
         model = model_from_json(json_string)
         model.summary()
         adam = keras.optimizers.Adam(lr=self.learning_rate)
         model.compile(loss='categorical_crossentropy',
                       optimizer='adam',
                       metrics=['accuracy'])
         model.load_weights(os.path.join(self.f_model, weights_filename))
         cvv.realtime_identification_color(classnamelist, model,
                                           trigger_type, gain, exp,
                                           self.im_size_width,
                                           self.im_size_height, self.flip)
         cbks = []
     KTF.set_session(old_session)
 def __init__(self, **kwargs):
     self.__dict__.update(self._defaults)  # set up default values
     self.__dict__.update(kwargs)  # and update with user overrides
     self.class_names = self._get_class()
     self.anchors = self._get_anchors()
     self.sess = K.get_session()
     self.boxes, self.scores, self.classes = self.generate()
示例#15
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()

    # if it's done something you should see a number being outputted
    print("\nGarbage Collector: ", gc.collect())
示例#16
0
    def load_config(self):
        # read model config from environment
        self.device_str = os.environ.get("device_id", "/cpu:0")
        self.user_config = tf.ConfigProto(allow_soft_placement=False)
        gpu_mem_limit = float(os.environ.get("gpu_mem_limit", 0.3))
        self.user_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_limit
        self.user_config.gpu_options.allow_growth = True
        if os.environ.get("log_device_placement", False):
            self.user_config.log_device_placement = True
        print("device id %s, gpu memory limit: %f" %
              (self.device_str, gpu_mem_limit))

        self.graph = tf.Graph()
        with self.graph.as_default():
            with tf.device(self.device_str):
                self.session = tf.Session(config=self.user_config,
                                          graph=self.graph)
                KTF.set_session(self.session)
                self.model = KerasXception(weights=self.weight,
                                           input_shape=(self.input_shape[0],
                                                        self.input_shape[1],
                                                        self.input_shape[2]),
                                           pooling=self.pooling,
                                           include_top=False)
                self.graph = KTF.get_graph()
                self.session = KTF.get_session()
                self.model.trainable = False
                self.model.predict(
                    np.zeros((1, self.input_shape[0], self.input_shape[1], 3)))
                self.graph.as_default()
                self.session.as_default()
示例#17
0
def OCR(image_path, typeP, attribute, thresholding=160):
    """
        用来连接OCR调用,通过home/views.py来预加载全局模型
        imgae_path 输入图片路径,识别图片为行提取结果
    """

    time11 = time.time()

    #global verify_global_model
    #global global_model
    global tax_model

    with K.get_session().graph.as_default():
        if typeP == 'normal' and attribute == 'verifyCode':
            print('model:    3_global_model')
            #out, _ = veryVat.predict(image_path, verify_global_model)
            out = tax.predict(image_path, tax_model)
        elif typeP == 'train':
            out = predict.predict_single(image_path)
        else:
            print('model:    global_model')
            #out, _ = ocrVat.predict(image_path, global_model)
            out = tax.predict(image_path, tax_model)

    time12 = time.time()
    print(attribute + ' 识别耗时:   ' + str(time12 - time11))

    return out
示例#18
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf.histogram_summary(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        tf.image_summary(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer.name),
                                         layer.output)
示例#19
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and not self.merged:
            mod_type = self.model.get_config()['name']
            if mod_type == 'Sequential':
                layers = {l.get_config()['name']: l for l in self.model.layers}
            elif mod_type == 'Graph':
                layers = self.model.nodes
            else:
                raise Exception('Unrecognized model:',
                                self.model.get_config()['name'])
            for l in layers:
                cur_layer = layers[l]
                if hasattr(cur_layer, 'W'):
                    tf.histogram_summary('{}_W'.format(l), cur_layer.W)
                if hasattr(cur_layer, 'b'):
                    tf.histogram_summary('{}_b'.format(l), cur_layer.b)
                if hasattr(cur_layer, 'get_output'):
                    tf.histogram_summary('{}_out'.format(l),
                                         cur_layer.get_output())
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter(self.log_dir,
                                             self.sess.graph_def)
示例#20
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, 'W'):
                    tf.histogram_summary('{}_W'.format(layer), layer.W)
                if hasattr(layer, 'b'):
                    tf.histogram_summary('{}_b'.format(layer), layer.b)
                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer), layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if tf.__version__ >= '0.8.0':
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
示例#21
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and not self.merged:
            mod_type = self.model.get_config()['name']
            if mod_type == 'Sequential':
                layers = {l.get_config()['name']: l for l in self.model.layers}
            elif mod_type == 'Graph':
                layers = self.model.nodes
            else:
                raise Exception('Unrecognized model:',
                                self.model.get_config()['name'])
            for l in layers:
                cur_layer = layers[l]
                if hasattr(cur_layer, 'W'):
                    tf.histogram_summary('{}_W'.format(l), cur_layer.W)
                if hasattr(cur_layer, 'b'):
                    tf.histogram_summary('{}_b'.format(l), cur_layer.b)
                if hasattr(cur_layer, 'get_output'):
                    tf.histogram_summary('{}_out'.format(l),
                                         cur_layer.get_output())
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter(self.log_dir,
                                             self.sess.graph_def)
示例#22
0
def reset_keras():
    """
    Releases keras session
    """
    sess = get_session()
    clear_session()
    sess.close()
示例#23
0
def train():
    X = np.load('./data/train_X.npy')
    y = np.load('./data/train_y.npy')

    train_datagen = ImageDataGenerator(rescale=1.0 / 255.)
    validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)
    train_generator = train_datagen.flow(X, y, batch_size=300)
    validation_generator = validation_datagen.flow(X, y)

    nb_epoch = 2000
    nb_train_samples = N
    nb_validation_samples = 600
    old_session = KTF.get_session()
    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)
        model = Sequential()
        with tf.name_scope("inference") as scope:
            model = set_model(model)
        model.summary()
        fpath = './model/weights.hdf5'
        tb_cb = TensorBoard(log_dir="./tensorlog", histogram_freq=1)
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=False,
                                mode='auto',
                                save_weights_only=True)
        history = model.fit_generator(train_generator,samples_per_epoch=nb_train_samples, \
                                                        nb_epoch=nb_epoch, validation_data=validation_generator, \
                                                        nb_val_samples=nb_validation_samples,\
                                                        callbacks=[cp_cb, tb_cb])
示例#24
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, 'W'):
                    tf.histogram_summary('{}_W'.format(layer), layer.W)
                if hasattr(layer, 'b'):
                    tf.histogram_summary('{}_b'.format(layer), layer.b)
                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
示例#25
0
    def __init__(self, yolo, **cfgs):
        self.yolo = yolo
        self.model_path = cfgs['model_path']
        self.anchors_path = cfgs['anchors_path']
        self.classes_path = cfgs['classes_path']

        self.score_threshold = cfgs['score_threshold']
        self.iou_threshold = cfgs['iou_threshold']

        self.class_names = self._get_class()
        self.anchors = self._get_anchors()
        self.num_anchors = len(self.anchors)
        self.num_classes = len(self.class_names)
        #self.colors = self._colors()

        if cfgs['cpu']:
            config = tf.ConfigProto(device_count={"GPU": 0})
            K.set_session(tf.Session(config=config))

        self.sess = K.get_session()

        self.model_image_size = (416, 416)  # fixed size or (None, None), hw
        self.input_image_shape = K.placeholder(shape=(2, ))

        self.yolo_model = self._load_weights()
        self.boxes, self.scores, self.classes = self.yolo.yolo_eval(
            self.yolo_model.output,
            self.anchors,
            self.num_classes,
            self.input_image_shape,
            score_threshold=self.score_threshold,
            iou_threshold=self.iou_threshold)
示例#26
0
    def __init__(self,
                 enable_actions,
                 environment_name,
                 graves=False,
                 ddqn=False):
        # parameters
        self.name = os.path.splittext(os.path.basename(__file__))[0]
        self.environment_name = environment_name
        self.enable_actions = enable_actions
        self.n_action = enable_actions
        self.n_action = len(self.enable_actions)
        self.minibatch_size = 32
        self.replay_memory_size = 5000
        self.learning_rate = 0.00025
        self.discount_factor = 0.9
        self.use_graves = graves
        self.use_ddqn = ddqn
        self.exploration = INITIAL_EXPLORATION
        self.exploration_step = (INITIAL_EXPLORATION -
                                 FINAL_EXPLORATION) / EXPLORATION_STEPS
        self.model_dir = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "models")
        self.model_name = "{}.ckpt".format(self.environment_name)

        self.old_session = KTF.get_session()
        self.session = tf.Session("")
        KTF.set_session(self.session)

        # replay memory
        self.D = deque(maxlen=self.replay_memory_size)

        # variables
        self.current_loss = 0.0
示例#27
0
def saveTF1p6Model(protobuffer_name):
    frozen_graph = freeze_session(
        K.get_session(), output_names=[out.op.name for out in model.outputs])
    tf.train.write_graph(frozen_graph,
                         os.getcwd(),
                         protobuffer_name,
                         as_text=False)
    print("Saved frozen model in ", protobuffer_name)
示例#28
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()

    # use the same config as you used to create the session
    config = tf.ConfigProto() #allow_soft_placement=True, log_device_placement=True)
    set_session(tf.Session(config=config))
示例#29
0
def reset_keras(classifier):
    sess = get_session()

    clear_session()
    sess.close()
    sess = get_session()

    try:
        del classifier # this is from global space - change this as you need
    except:
        pass


    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
示例#30
0
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del classifier  # this is from global space - change this as you need
    except:
        pass

    # print(gc.collect()) # if it's done something you should see a number being outputted

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
示例#31
0
文件: utils.py 项目: saswat0/PSO-net
def reset_keras():
    sess = get_session()
    clear_session()
    sess.close()
    sess = get_session()

    try:
        del model
    except:
        pass

    _ = gc.collect()

    # use the same config as you used to create the session
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    config.gpu_options.visible_device_list = "0"
    set_session(tensorflow.Session(config=config))
示例#32
0
    def reset_keras(self):

        torch.cuda.empty_cache()

        sess = get_session()
        clear_session()
        sess.close()
        sess = get_session()

        try:
            del classifier
        except:
            pass

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 1.0
        config.gpu_options.visible_device_list = "0"
        set_session(tf.Session(config=config))
示例#33
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    tf.histogram_summary(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        tf.image_summary(weight.name, w_img)

                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer.name),
                                         layer.output)
        if parse_version(tf.__version__) >= parse_version('0.12.0'):
            self.merged = tf.summary.merge_all()
        else:
            self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.12.0'):
                self.writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
            elif parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            if parse_version(tf.__version__) >= parse_version('0.12.0'):
                self.writer = tf.summary.FileWriter(self.log_dir)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir)
示例#34
0
文件: began.py 项目: aasensio/DNHazel
    def train(self, ep=2000,noise_level=.01):

        gan_feed = self.gan()
        sess = K.get_session()

        np.random.shuffle(self.x_train)
        shuffled_cifar = self.x_train
        length = len(shuffled_cifar)

        for i in range(ep):
            j = i % int(length / self.batch_size)
            minibatch = shuffled_cifar[j*self.batch_size:(j+1)*self.batch_size]

            z_input = np.random.normal(loc=0.,scale=1.,size=(self.batch_size,self.zed))

            # train for one step
            losses = gan_feed(sess,minibatch,z_input)
            if (i % 10 == 0):
                print('{0} - dloss:{1:6.4f} gloss:{2:6.4f}'.format(i,losses[0],losses[1]))

            
        self.show()
    def __call__(self, inputs):
        if not isinstance(inputs, (list, tuple)):
            raise TypeError('`inputs` should be a list or tuple.')
        feed_dict = {}
        for tensor, value in zip(self.inputs, inputs):
            if is_sparse(tensor):
                sparse_coo = value.tocoo()
                indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
                                          np.expand_dims(sparse_coo.col, 1)),
                                         1)
                value = (indices, sparse_coo.data, sparse_coo.shape)
            feed_dict[tensor] = value
        session = get_session()
        enqueue_ops = self._enqueue_ops
        neops = len(enqueue_ops)
        updated = session.run(enqueue_ops + self.outputs + [self.updates_op],
                              feed_dict=feed_dict,
                              **self.session_kwargs)
        nouts = len(self.outputs)

        # return updated[:len(self.outputs)]
        return updated[neops:nouts + neops]
示例#36
0
    def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, "W"):
                    tf.histogram_summary("{}_W".format(layer), layer.W)
                if hasattr(layer, "b"):
                    tf.histogram_summary("{}_b".format(layer), layer.b)
                if hasattr(layer, "output"):
                    tf.histogram_summary("{}_out".format(layer), layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if tf.__version__ >= "0.8.0":
                self.writer = tf.train.SummaryWriter(self.log_dir, self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir, self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir)
示例#37
0
def test_TensorBoard():
    import shutil
    import tensorflow as tf
    import keras.backend.tensorflow_backend as KTF
    old_session = KTF.get_session()
    filepath = './logs'
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    def data_generator_graph(train):
        while 1:
            if train:
                yield {'X_vars': X_train, 'output': y_train}
            else:
                yield {'X_vars': X_test, 'output': y_test}

    # case 1 Sequential

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        model = Sequential()
        model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(nb_class, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])

        tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
        cbks = [tsb]

        # fit with validation data
        model.fit(X_train, y_train, batch_size=batch_size,
                  validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)

        # fit with validation data and accuracy
        model.fit(X_train, y_train, batch_size=batch_size,
                  validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)

        # fit generator with validation data
        model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
                            validation_data=(X_test, y_test),
                            callbacks=cbks)

        # fit generator without validation data
        model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
                            callbacks=cbks)

        # fit generator with validation data and accuracy
        model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
                            validation_data=(X_test, y_test),
                            callbacks=cbks)

        # fit generator without validation data and accuracy
        model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
                            callbacks=cbks)

        assert os.path.exists(filepath)
        shutil.rmtree(filepath)

    # case 2 Graph

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        model = Graph()
        model.add_input(name='X_vars', input_shape=(input_dim,))

        model.add_node(Dense(nb_hidden, activation="sigmoid"),
                       name='Dense1', input='X_vars')
        model.add_node(Dense(nb_class, activation="softmax"),
                       name='last_dense',
                       input='Dense1')
        model.add_output(name='output', input='last_dense')
        model.compile(optimizer='sgd', loss={'output': 'mse'})

        tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
        cbks = [tsb]

        # fit with validation
        model.fit({'X_vars': X_train, 'output': y_train},
                  batch_size=batch_size,
                  validation_data={'X_vars': X_test, 'output': y_test},
                  callbacks=cbks, nb_epoch=2)

        # fit wo validation
        model.fit({'X_vars': X_train, 'output': y_train},
                  batch_size=batch_size,
                  callbacks=cbks, nb_epoch=2)

        # fit generator with validation
        model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
                            validation_data={'X_vars': X_test, 'output': y_test},
                            callbacks=cbks)

        # fit generator wo validation
        model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
                            callbacks=cbks)

        assert os.path.exists(filepath)
        shutil.rmtree(filepath)

    KTF.set_session(old_session)