コード例 #1
0
ファイル: face_feature_demo.py プロジェクト: johndpope/BA
    def detect(self, image):
        cuda.select_device(0)

        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
        ROOT_DIR = "/home/bernihoh/Bachelor/SMS/MaskRCNN/samples/SMSNetworks/face_feature_detection/"
        MODEL_DIR = os.path.join(ROOT_DIR, "logsFaceFeatureDetection")
        COCO_MODEL_PATH = "/home/bernihoh/Bachelor/SMS/MaskRCNN/samples/SMSNetworks/face_feature_detection/mask_rcnn_face_feature_detection_0029.h5"
        config = InferenceConfig()
        config.display()

        # Create model object in inference mode.
        model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)

        # Load weights trained on MS-COCO
        model.load_weights(COCO_MODEL_PATH, by_name=True)

        class_names = ["bg", "iris_l", "inner_eye_l", "outer_eye_l", "eye_brow_l", "cheek_l", "iris_r",
                       "inner_eye_r", "outer_eye_r", "eye_brow_r", "cheek_r", "nose_tip", "nose", "mouth",
                       "chin", "face", "head", "distortion"]

        results = model.detect([image], verbose=1)
        r = results[0]
        session.close()
        cuda.close()
        return r
コード例 #2
0
 def train(self,
           sess: tf.InteractiveSession,
           train_x: np.ndarray,
           train_y: np.ndarray,
           valid_x: np.ndarray,
           valid_y: np.ndarray,
           epoch=20,
           batch_size=128,
           valid_batch_size=50,
           step=200,
           verbose=True):
     print(get_now(), 'start training')
     train_idx = sorted(range(len(train_x)),
                        key=lambda x: len(train_x[x]),
                        reverse=True)
     valid_idx = sorted(range(len(valid_x)),
                        key=lambda x: len(valid_x[x]),
                        reverse=True)
     sess.run(tf.global_variables_initializer())
     best_aupr = best_auc = current = 0
     for idx_epoch in range(epoch):
         for i in range(0, len(train_idx), batch_size):
             batch_idx = train_idx[i:i + batch_size]
             train_loss = self.train_step(sess, train_x[batch_idx],
                                          train_y[batch_idx])
             current += 1
             if current % step == 0:
                 print(get_now())
                 valid_loss, valid_res = 0, np.empty([len(valid_idx), 1],
                                                     dtype=int)
                 for j in range(0, len(valid_idx), valid_batch_size):
                     valid_batch_idx = valid_idx[j:j + valid_batch_size]
                     loss, output = self.valid_step(
                         sess, valid_x[valid_batch_idx],
                         valid_y[valid_batch_idx])
                     valid_res[valid_batch_idx] = output
                     valid_loss += loss * len(valid_batch_idx)
                 valid_loss /= len(valid_idx)
                 auc, aupr = get_auc(valid_y, valid_res), get_aupr(
                     valid_y, valid_res)
                 if aupr > best_aupr:
                     best_aupr = aupr
                     best_auc = auc
                     self.saver.save(sess, self.model_path)
                 if verbose:
                     print(get_now(), current, current * batch_size,
                           idx_epoch, i + batch_size, 'train loss:',
                           round(train_loss, 5), 'valid loss:',
                           round(valid_loss, 5), 'AUC:', round(auc,
                                                               5), 'AUPR:',
                           round(aupr, 5))
     print(get_now(), 'Summary ', 'Best AUC:', best_auc, 'Best AUPR:',
           best_aupr)
コード例 #3
0
ファイル: gradient.py プロジェクト: minfeixia/master-code
def get_gradient(network: keras.models.Sequential, X: numpy.array, session: tensorflow.InteractiveSession) -> numpy.array:
    """
    Calculates the gradient for a given network/input
    :param network: Network
    :type network: keras.Sequential
    :param X: Input to network.
    :type X: numpy.array
    :param session: Session object
    :type session: tensorflow.InteractiveSession
    :returns: Gradient as numpy.array
    """
    x = tensorflow.placeholder(tensorflow.float32, X.shape)

    prediction = network(x)

    y_shape = tensorflow.shape(prediction)
    classes = y_shape[1]
    index = tensorflow.argmax(prediction, axis=1)
    target = tensorflow.one_hot(index, classes, on_value=1.0, off_value=0.0)

    logits, = prediction.op.inputs
    loss = tensorflow.nn.softmax_cross_entropy_with_logits(labels=target, logits=logits)
    gradient, = tensorflow.gradients(loss, x)

    return session.run(gradient, feed_dict={x: X, keras.backend.learning_phase(): 0})
コード例 #4
0
def matrix_to_text(token, x):
    print('Converting to text vector...')
    reverse_word_dict = dict(map(reversed, token.word_index.items()))
    InteractiveSession()
    seqs_to_words = lambda y: list(
        map(reverse_word_dict.get,
            argmax(y, axis=-1).eval()))
    return seqs_to_words(x)
コード例 #5
0
def create_face_net_dict_files_for_train_val(picture_path, pic_file):
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    pic = skimage.io.imread(picture_path + pic_file)
    ffn = FFN(pic)
    ffn_results = ffn.detect()
    session.close()
    if ffn_results == "redo":
        print("redo")
        return "Failure"
    else:
        output = open(
            "/home/bernihoh/Bachelor/SMS/modification/netcompare/services/face_net/face_net_pkl_val/"
            + pic_file + ".pkl", "wb")
        pkl.dump(ffn_results["face_score_input"], output)
        output.close()
        return "Success"
コード例 #6
0
def define_video_config():
    """
    Определяются параметры использования видеокарты
    """
    # Определяются параметры использования видеокарты
    config_proto = ConfigProto()
    # config_proto.gpu_options.per_process_gpu_memory_fraction = 0.8
    config_proto.gpu_options.allow_growth = True
    session = InteractiveSession(config=config_proto)
コード例 #7
0
def prepare_environment():
    np.random.seed(1)
    random.seed(1)

    from tensorflow import ConfigProto
    from tensorflow import InteractiveSession
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
コード例 #8
0
    def __init__(self, image_size=[28, 28], char_number=10, channel=1):
        self.image_size = image_size
        self.char_number = char_number
        self.channel = channel
        # self.learning_rate = learning_rate
        # self.eval_every = eval_every
        # self.epochs = epochs
        # self.evaluation_size = evaluation_size
        # self.batch_size = batch_size
        # self.optimizer = optimizer

        self.inputs = placeholder(float32,
                                  shape=[None, image_size[0] * image_size[1]],
                                  name="inputs")

        self.model = reshape(self.inputs,
                             [-1, image_size[0], image_size[1], channel])
        self.labels = placeholder(float32, shape=[None, 10], name="labels")
        self.sess = InteractiveSession()
コード例 #9
0
def sequences_to_text(token, x):
    print('Converting to text...')
    reverse_word_dict = dict(map(reversed, token.word_index.items()))
    InteractiveSession()
    from_categorical = lambda y: argmax(y, axis=-1).eval()
    seqs_to_words = lambda y: list(
        map(reverse_word_dict.get, from_categorical(y)))
    words_to_sentence = lambda y: ' '.join(filter(None, y))
    word_matrix = list(map(seqs_to_words, x))
    sentence_list = list(map(words_to_sentence, word_matrix))
    return '\n'.join(sentence_list)
コード例 #10
0
 def predict_step(self, sess: tf.InteractiveSession, data_x: np.ndarray):
     batch_num, res = len(data_x), []
     for i in range(batch_num):
         feed_dict = {
             self.data_smi: data_x[i][0],
             self.data_adj: data_x[i][1],
             self.data_seq: data_x[i][2],
             self.training: False
         }
         output = sess.run(self.output, feed_dict=feed_dict)
         res.append(output[0])
     return res
コード例 #11
0
    def detect(self):
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
        self.MODEL_DIR = self.model_dir()
        self.NETWORK_PATH = self.network_path()
        self.InferenceConfig = self.inference_config()
        self.InferenceConfig.display()
        self.model = modellib.MaskRCNN(mode="inference",
                                       model_dir=self.MODEL_DIR,
                                       config=self.InferenceConfig)
        self.model.load_weights(self.NETWORK_PATH, by_name=True)
        results = self.model.detect([self.image], verbose=1)
        """r looks like this: r['rois'], r['masks'], r['class_ids'], r['scores']"""
        r = results[0]

        class_names = self.class_names()

        visualize.display_instances(self.image, r['rois'], r['masks'],
                                    r['class_ids'], class_names, r['scores'])
        session.close()
        ret = self.further_image_manipulation(r)
        return ret
コード例 #12
0
 def train_step(self, sess: tf.InteractiveSession, train_x: np.ndarray,
                train_y: np.ndarray):
     batch_loss, batch_num = 0, len(train_x)
     for i in range(batch_num):
         batch_x, batch_y = train_x[i], train_y[i]
         feed_dict = {
             self.data_smi: batch_x[0],
             self.data_adj: batch_x[1],
             self.data_seq: batch_x[2],
             self.data_y: np.array(batch_y).reshape([-1, 1]),
             self.training: True
         }
         _, loss = sess.run([self.optimizer, self.loss],
                            feed_dict=feed_dict)
         batch_loss += loss / batch_num
     return batch_loss
コード例 #13
0
 def valid_step(self, sess: tf.InteractiveSession, valid_x: np.ndarray,
                valid_y: np.ndarray):
     batch_loss, batch_num, res = 0, len(valid_x), []
     for i in range(batch_num):
         batch_x, batch_y = valid_x[i], valid_y[i]
         feed_dict = {
             self.data_smi: batch_x[0],
             self.data_adj: batch_x[1],
             self.data_seq: batch_x[2],
             self.data_y: np.array(batch_y).reshape([-1, 1]),
             self.training: False
         }
         loss, output = sess.run([self.loss, self.output],
                                 feed_dict=feed_dict)
         batch_loss += loss / batch_num
         res.append(output[0])
     return batch_loss, res
コード例 #14
0
ファイル: owndemo3.py プロジェクト: johndpope/BA
    def detect(self, image_path, background_color):
        cuda.select_device(0)

        config = ConfigProto()
        config.gpu_options.allow_growth = True
        session = InteractiveSession(config=config)
        ROOT_DIR = os.path.abspath("/")
        MODEL_DIR = os.path.join(ROOT_DIR, "logs")
        sys.path.append(ROOT_DIR)  # To find local version of the library
        sys.path.append(os.path.join(ROOT_DIR, "samples/coco/"))  # To find local version
        COCO_MODEL_PATH = "/home/bernihoh/Bachelor/MaskRCNN/mask_rcnn_coco.h5"
        if not os.path.exists(COCO_MODEL_PATH):
            utils.download_trained_weights(COCO_MODEL_PATH)
        config = InferenceConfig()
        config.display()
        model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
        model.load_weights(COCO_MODEL_PATH, by_name=True)

        COCO_DIR = "/home/bernihoh/Bachelor/MaskRCNN/samples/coco"  # geändert: Zeile eingefügt
        dataset = coco.CocoDataset()
        dataset.load_coco(COCO_DIR, "train")
        dataset.prepare()

        class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
                       'bus', 'train', 'truck', 'boat', 'traffic light',
                       'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
                       'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
                       'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
                       'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
                       'kite', 'baseball bat', 'baseball glove', 'skateboard',
                       'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
                       'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
                       'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
                       'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
                       'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
                       'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
                       'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
                       'teddy bear', 'hair drier', 'toothbrush']
        image = skimage.io.imread(image_path)
        print(dataset.class_names)

        plt.imshow(image)
        # Run detection
        results = model.detect([image], verbose=1)

        # Visualize results
        r = results[0]
        visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
        used_class = r["class_ids"]
        print(used_class)
        mask = r["masks"]
        mask = mask.astype(np.ubyte)
        # maskimg = mask[:, :, 1] ^ mask[:, :, 1]
        maskimg = np.zeros((image.shape[0], image.shape[1]))
        maskimg = maskimg.astype(np.ubyte)
        background_mask = np.full((image.shape[0], image.shape[1]), background_color, dtype=np.uint8)
        skimage.io.imshow(background_mask)
        plt.show()
        for i in range(mask.shape[2]):
        # skimage.io.imshow(mask[:, :, i])
        # plt.show()
        # maskimg = maskimg | mask[:, :, i]
            a = used_class[i] - 1
            if used_class[i] - 1 < 0:
                a = 0
            background_mask = background_mask - mask[:, :, i] * (a + 127)
            maskimg = np.maximum(maskimg, mask[:, :, i] * a)
        skimage.io.imshow(background_mask)
        plt.show()
        maskimg = np.maximum(maskimg, background_mask)
        # maskimg[maskimg == 0] = 124
        # maskimg = skimage.exposure.rescale_intensity(maskimg)
        skimage.io.imshow(maskimg)
        plt.show()
        # skimage.io.imsave("/home/bernihoh/Bachelor/MaskRCNN/ownimages/mask138-1.jpg", maskimg)
        session.close()
        cuda.close()
        return maskimg
コード例 #15
0
from dataset.dataset_factory import DataSetFactory, CocoSetFactory
# Добавляется в поиск библиотека MaskRCNN
add_mask_rcnn(sys.argv[0])
from mask_rcnn.mrcnn.model import MaskRCNN
# import imgaug

# START_MODEL_PATH = 'start_mask/mask_rcnn_start.h5'
START_MODEL_PATH = 'mask_rcnn_detect_cfg_0005.h5'
COCO_PATH = '/home/ids/coco'
CLASS_IDS = [1, 2]

# Определяются параметры использования видеокарты
config_proto = ConfigProto()
# config_proto.gpu_options.per_process_gpu_memory_fraction = 0.8
config_proto.gpu_options.allow_growth = True
session = InteractiveSession(config=config_proto)

# Подготавливаем тренировочный и тестовый набор данных
# train_set = DataSetFactory.new_instance('data/img')
# test_set = DataSetFactory.new_instance('data/val')
train_set = CocoSetFactory.new_instance(COCO_PATH, 'train', 2017, CLASS_IDS)
test_set = CocoSetFactory.new_instance(COCO_PATH, 'val', 2017, CLASS_IDS)

# Определяется конфигурация
config = DetectConfig()
config.display()
# Определяется модель
model = MaskRCNN(mode='training', model_dir='./', config=config)
# Произвести загрузку стартовой модели
model.load_weights(START_MODEL_PATH,
                   by_name=True,
コード例 #16
0
ファイル: vqa.py プロジェクト: erosvall/tbir
def main(argv=None):
    # EXAMPLES
    argparser = argparse.ArgumentParser(description='A visual question answerer.')
    # optional arguments
    argparser.add_argument('--load', type=str,
                           help='Filename of existing model, default None')
    argparser.add_argument('--test', type=str,
                           help='Filename of test data, default qa.894.raw.test.txt')
    argparser.add_argument('--q', type=str,
                           help='Pose one question for the visual question answerer')
    argparser.add_argument('--e', type=int,
                           help='Number of epochs, default 1')
    argparser.add_argument('--ld1', type=int,   
                           help='Latent dimension 1, default 512')
    argparser.add_argument('--b', type=int,
                           help='Batch size, default 32')
    argparser.add_argument('--drop', type=float,
                           help='Dropout percentage, default 0.5')
    argparser.add_argument('--aeweight', type=float,
                           help='Weight of the autoencoder loss function compared to the answer loss function, default 1.0')
    argparser.add_argument('--wups', action="store_true",
                           help='Compute the WUPS Score')
    argparser.add_argument('--textonly', action="store_true",
                           help='Ignore the images')
    argparser.add_argument('--visualonly', action="store_true",
                           help='Without autoencoder')
    argparser.add_argument('--improve', action="store_true",
                           help='Further train the loaded model')
    argparser.add_argument('--checkpoint', action="store_true",
                           help='Save at every epoch')

    args = argparser.parse_args(argv)


    # Hyper Parameters
    ld1 = 512
    epochs = 1
    batch = 32
    drop = 0.5
    ae_weight = 1.0
    test = "qa.894.raw.test.txt"

    if args.ld1:
        ld1 = args.ld1
    if args.e:
        epochs = args.e
    if args.b:
        batch = args.b
    if args.drop:
        drop = args.drop
    if args.aeweight:
        ae_weight = args.aeweight
    if args.test:
        test = args.test

    print('--e Number of epochs: ' + str(epochs))
    print('--ld1 Latent dimension 1: ' + str(ld1))
    print('--b Batch size: ' + str(batch))
    print('--drop Dropout percentage: ' + str(drop))
    print('--aeweight Autoencoder Loss Weight: ' + str(ae_weight))
    print('')

    InteractiveSession()

    test_x, test_t, qa_answer, qa_question, train_token = model(epochs,drop,ae_weight,ld1,batch,
        args.load,args.q,test,args.textonly,args.visualonly,args.improve,args.checkpoint)

    nbtest = len(test_t)

    if args.wups:
        postp.print_wups_acc(test_t,qa_answer,train_token)
        if not args.visualonly:
            postp.print_ae_acc(test_x,qa_question,train_token)
    postp.print_compare(test_x,test_t,qa_answer,qa_question,nbtest,train_token)
コード例 #17
0
import sys
import pickle
from optparse import OptionParser
import time
from keras_frcnn import config
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras_frcnn import roi_helpers
from keras.applications.mobilenet import preprocess_input

from tensorflow import ConfigProto
from tensorflow import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

sys.setrecursionlimit(40000)

parser = OptionParser()

parser.add_option("-p", "--path", dest="test_path", help="Path to test data.")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
				help="Number of ROIs per iteration. Higher means more memory use.", default=32)
parser.add_option("--config_filename", dest="config_filename", help=
				"Location to read the metadata related to the training (generated when training).",
				default="config.pickle")
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--write", dest="write", help="to write out the image with detections or not.", action='store_true')
parser.add_option("--load", dest="load", help="specify model path.", default=None)
(options, args) = parser.parse_args()
コード例 #18
0
class CNNModel:
    def __init__(self, image_size=[28, 28], char_number=10, channel=1):
        self.image_size = image_size
        self.char_number = char_number
        self.channel = channel
        # self.learning_rate = learning_rate
        # self.eval_every = eval_every
        # self.epochs = epochs
        # self.evaluation_size = evaluation_size
        # self.batch_size = batch_size
        # self.optimizer = optimizer

        self.inputs = placeholder(float32,
                                  shape=[None, image_size[0] * image_size[1]],
                                  name="inputs")

        self.model = reshape(self.inputs,
                             [-1, image_size[0], image_size[1], channel])
        self.labels = placeholder(float32, shape=[None, 10], name="labels")
        self.sess = InteractiveSession()

    def addLayer(self, layer):
        self.model = layer.implement(self.model)

    def addOutputLayer(self, layer):
        self.loss = layer.implement(self.model, self.labels)

    def train(self,
              dataset,
              eval_every=5,
              epochs=500,
              evaluation_size=500,
              batch_size=100,
              optimizer=train.MomentumOptimizer(0.005, 0.9)):
        train_step = optimizer.minimize(self.loss)
        prediction = argmax(self.model, 1, name="prediction")
        result = equal(argmax(self.labels, 1), prediction, name="result")
        self.accuracy = reduce_mean(cast(result, float32), name="accuracy")
        train_loss = []
        train_accuracy = []
        test_accuracy = []

        global_variables_initializer().run()

        for i in range(epochs):
            # Lay ra batch_size hinh anh tu tap train
            train_batch = dataset.train.next_batch(batch_size)
            train_dict = {
                self.inputs: train_batch[0],
                self.labels: train_batch[1]
            }
            if i % eval_every == 0:

                # Cu sau eval_every buoc lap thi test mot lan
                test_batch = dataset.test.next_batch(evaluation_size)
                temp_train_loss = self.loss.eval(feed_dict=train_dict)
                temp_train_accuracy = self.accuracy.eval(feed_dict=train_dict)
                temp_test_accuracy = self.accuracy.eval(
                    feed_dict={
                        self.inputs: test_batch[0],
                        self.labels: test_batch[1]
                    })

                print(
                    'Epoch # %d, Train Loss: %g. Train Accuracy (Test Accuracy): %g (%g)'
                    % (i, temp_train_loss, temp_train_accuracy,
                       temp_test_accuracy))

                # Luu cac gia tri de ve bieu do
                train_loss.append(temp_train_loss)
                train_accuracy.append(temp_train_accuracy)
                test_accuracy.append(temp_test_accuracy)

            # Chay thuat toan toi uu ham mat mat
            self.sess.run(train_step, feed_dict=train_dict)

        # Show plots
        loss_per_epoch(epochs, eval_every, train_loss)
        train_test_accuracy(epochs, eval_every, train_accuracy, test_accuracy)

    def test(self, dataset):
        """
            test model with entire mnist_test
        """
        print('test accuracy %g' % self.accuracy.eval(
            feed_dict={
                self.inputs: dataset.test.images,
                self.labels: dataset.test.labels,
            }))

    def save(self, model_path):
        """
            save model to folder in model_path
        """
        saver = train.Saver()
        saver.save(self.sess, model_path)