Exemplo n.º 1
0
 def downloadTrailer(self):
     """ Download the trailer. """
     # Check we already have a trailer
     if (self.curTrailerName):
         log.info("  Trailer already found: %s/%s" % (self.dirPath, self.curTrailerName))
         return None
     # Check we have a trailerUrl
     if (not self.trailerUrl):
         log.info("  TrailerUrl not found for: %s" % (self.title or self.curTitle))
         return None
     # Make sure the path doesn't already exist
     trailerPath = self.newFileNames[0] or self.curFileNames[0]
     trailerPath = trailerPath.replace('.part1', '')
     trailerPath = "%s/%s-trailer.flv" % (self.dirPath, trailerPath[0:-4])
     if (os.path.exists(trailerPath)):
         log.warn("  Filepath already exists: %s" % trailerPath)
         return None
     # All Set, Download It!
     log.info("  Downloading trailer to: %s" % trailerPath)
     if ('traileraddict.com' in self.trailerUrl):
         traileraddict.downloadTrailer(self.trailerUrl, trailerPath)
     elif ('youtube.com' in self.trailerUrl):
         youtube.downloadTrailer(self.trailerUrl, trailerPath)
         
         
Exemplo n.º 2
0
    def builder_externals(self,pkgs,exclusions=None):
        '''Take an ordered list of LCG_Interface packages, return an
        ordered list of LCG_Interface packages.  If an input package
        has dependencies they will be inserted before the input
        package name.'''
        from util import log

        ret = []
        for pkg in pkgs:
            builder_dir = self.builder_directory(pkg)
            if not builder_dir:
                log.warn('Unable to find builder directory for "%s"'%pkg)
                continue
            builder_pkg = os.path.basename(builder_dir)
            rel_dir = os.path.join('LCG_Builders',builder_pkg)

            externals = self.externals(package=rel_dir,exclusions=exclusions)
            #print 'builder_pkg=',builder_pkg,' has externals:',externals
            for ext in externals:
                if ext == pkg: continue

                ext_dir = self.builder_directory(ext)
                if not ext_dir:
                    log.warn('Unable to find builder directory for "%s"'%ext)

                if ext in ret: continue
                ret.append(ext)
                continue

            if pkg not in ret:
                ret.append(pkg)
            continue

        return ret
Exemplo n.º 3
0
 def _idxSubtitlesOK(self, dirPath, fileName):
     """ Return subtitles if the idx, sub names match up, otherwise []. """
     subPath = "%s/%s.sub" % (dirPath, fileName[0:-4])
     idxPath = "%s/%s.idx" % (dirPath, fileName[0:-4])
     if (not os.path.exists(subPath) or not os.path.exists(idxPath)):
         log.warn("  Subtitle Error: %s/%s" % (dirPath, fileName))
         return False
     return True
Exemplo n.º 4
0
 def _getNfoInfo(self):
     """ Return ElementTree object if NFO file exists. """
     if (self.curNfoName):
         try:
             nfoPath = "%s/%s" % (self.dirPath, self.curNfoName)
             return ElementTree.parse(nfoPath)
         except Exception, e:
             log.warn("  Invalid NFO file: %s; %s" % (nfoPath, e))
Exemplo n.º 5
0
 def _rename(self, src, dst):
     """ Rename the specified file. """
     if (src != dst):
         if (os.path.exists(dst)):
             log.warn("  Path already exists: %s" % dst)
             return None
         log.info("  >> Renaming: %s" % src)
         log.info("           to: %s" % dst)
         os.rename(src, dst)
         
Exemplo n.º 6
0
 def _getImdbInfoFromUrl(self, imdbUrl, logIt=True):
     """ Search IMDB For the movieID's info. """
     try:
         if (not imdbUrl): return None
         if (logIt): log.fine("  Looking up movie: %s" % imdbUrl)
         movieID = re.findall(IMDB_REGEX, imdbUrl)[0]
         return imdbpy.get_movie(movieID)
     except imdb.IMDbDataAccessError:
         log.warn("  IMDB Data Access Error: %s" % imdbUrl)
         return None
Exemplo n.º 7
0
  def _initialize_keys(self):
    init_dir(self.key_dir)
    priv = os.path.join(self.key_dir, 'sdb.private')
    pub = os.path.join(self.key_dir, 'sdb.public')
    try:
      self.priv_key = M2Crypto.RSA.load_key(priv)
      self.pub_key = M2Crypto.RSA.load_pub_key(pub)
    except:
      log.warn('Failed to load keys.  Regenerating...')
      self.priv_key = self._generate_pki_keys(priv, pub)

      mem = M2Crypto.BIO.MemoryBuffer()
      self.priv_key.save_key_bio(mem, cipher=None)
Exemplo n.º 8
0
 def _getVideoFiles(self):
     """ Return the AVI files that make up this video. """
     videoFiles = []
     for fileName in os.listdir(self.dirPath):
         filePath = "%s/%s" % (self.dirPath, fileName)
         test1 = SAMPLE_STRING not in fileName.lower()
         test2 = os.path.getsize(filePath) >= MIN_VIDEO_MB
         for ext in VIDEO_EXTENSIONS:
             test3 = fileName.lower().endswith(ext)
             if (test1 and test2 and test3):
                 videoFiles.append(fileName)
     if (not videoFiles):
         log.warn("  No video files found for: %s" % self.dirPath)
     return sorted(videoFiles)
Exemplo n.º 9
0
  def __init__(self, conf, bucket, file_name, crypto_helper):
    self.bucket = bucket

    self.crypto = crypto_helper
    self.dir = self.bucket.create_dir(_hash_path(file_name))
    self.conf = conf
    self.enc_aes_keys = dict()

    try:
      self.load_key_file()
    except FileNotFound:
      log.warn("Key files not present. Creating new ones.")
      # Create a new key for this bucket, and upload.
      self.aes_key = self.crypto.generate_aes_key()
      log.info("New AES key (base64): %s" % base64.encodestring(self.aes_key))
      self.enc_aes_keys[self.conf['email_address']] = base64.encodestring(
        self.crypto.encrypt_aes_key(self.aes_key))
      self.flush_key_file()
Exemplo n.º 10
0
    def __init__(self, config, dataset):
        self.config = config
        self.train_dir = config.train_dir
        log.info("self.train_dir = %s", self.train_dir)

        # --- input ops ---
        self.batch_size = config.batch_size

        self.dataset = dataset

        check_data_id(dataset, config.data_id)
        _, self.batch = create_input_ops(dataset,
                                         self.batch_size,
                                         data_id=config.data_id,
                                         is_training=False,
                                         shuffle=False)

        # --- create model ---
        self.model = Model(config)

        self.global_step = tf.contrib.framework.get_or_create_global_step(
            graph=None)
        self.step_op = tf.no_op(name='step_no_op')

        tf.set_random_seed(1234)

        session_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True),
            device_count={'GPU': 1},
        )
        self.session = tf.Session(config=session_config)

        # --- checkpoint and monitoring ---
        self.saver = tf.train.Saver(max_to_keep=100)

        self.checkpoint_path = config.checkpoint_path
        if self.checkpoint_path is None and self.train_dir:
            self.checkpoint_path = tf.train.latest_checkpoint(self.train_dir)
        if self.checkpoint_path is None:
            log.warn("No checkpoint is given. Just random initialization :-)")
            self.session.run(tf.global_variables_initializer())
        else:
            log.info("Checkpoint path : %s", self.checkpoint_path)
Exemplo n.º 11
0
    def extract(self):
        log.infov('vfeat extraction start')

        feed_dict = {
            'vfeat': self.model.outputs['V_ft'],
            'box': self.batch['box'],
            'normal_box': self.batch['normal_box'],
            'num_box': self.batch['num_box'],
            'image_id': self.batch['image_id'],
            'image_id_len': self.batch['image_id_len']
        }
        f = h5py.File(self.save_path, 'w')
        data_info = f.create_group('data_info')
        data_info['pretrained_param_path'] = \
            self.pretrained_param_path.replace('/', '-')
        data_info['max_box_num'] = np.array(self.data_cfg.max_roi_num,
                                            dtype=np.int32)
        vfeat_dim = 0
        for it in tqdm(range(self.num_iter), desc='extract feature'):
            try:
                res = self.session.run(feed_dict)
            except tf.errors.OutOfRangeError:
                log.warn('OutOfRangeError happens at {} iter'.format(it + 1))
            else:
                for b in range(res['vfeat'].shape[0]):
                    image_id = ''.join(
                        res['image_id'][b, :res['image_id_len'][b]])
                    num_box = res['num_box'][b]
                    vfeat = res['vfeat'][b, :num_box]
                    box = res['box'][b, :num_box]
                    normal_box = res['normal_box'][b, :num_box]

                    grp = f.create_group(image_id)
                    grp['num_box'] = num_box
                    grp['vfeat'] = vfeat
                    grp['box'] = box
                    grp['normal_box'] = normal_box
                    vfeat_dim = vfeat.shape[1]
        data_info['vfeat_dim'] = np.array(vfeat_dim, dtype=np.int32)
        log.infov('iteration terminated at [{}/{}] iter'.format(
            it + 1, self.num_iter))
        f.close()
        log.warn('vfeat extraction is done: {}'.format(self.save_path))
Exemplo n.º 12
0
    def eval_run(self, config):
        # load checkpoint
        if self.checkpoint_path:
            self.saver.restore(self.session, self.checkpoint_path)
            log.info("Loaded from checkpoint!")

        log.infov("Start 1-epoch Inference and Evaluation")

        log.info("# of examples = %d", len(self.dataset))
        length_dataset = len(self.dataset)

        max_steps = int(length_dataset / self.batch_size) + 1
        log.info("max_steps = %d", max_steps)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord,
                                               start=True)

        evaler = EvalManager(config)
        try:
            for _ in xrange(max_steps):
                batch_chunk, prediction_pred_real, prediction_pred_fake, prediction_gt = \
                    self.run_single_step(self.batch)
                evaler.add_batch(batch_chunk['id'], prediction_pred_real,
                                 prediction_pred_fake, prediction_gt)

        except Exception as e:
            coord.request_stop(e)

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))

        if not os.path.exists(self.checkpoint_path):
            os.makedirs(self.checkpoint_path)
        evaler.report(self.checkpoint_path)
        tf.reset_default_graph()
        self.session.close()
        log.infov("Evaluation complete.")
Exemplo n.º 13
0
        def deps(use,names):
            for dep in use.uses:
                #print '"%s" uses "%s" [%s]'%(use.name,dep.name,str(dep))
                if dep.project != 'lcgcmt' \
                        or dep.directory != 'LCG_Interfaces':
                    continue
                if dep.name in exclusions:
                    log.warn('Skipping excluded pkg "%s" needed by "%s"'%(dep.name,use.name))
                    continue
                deps(dep,names)
                if dep.name not in names:
                    names.append(dep.name)
                    log.info('Adding "%s" needed by "%s"'%(dep.name,use.name))
                continue
            if use.project == 'lcgcmt' \
                    and use.directory == 'LCG_Interfaces' \
                    and use.name not in names:
                names.append(use.name)

            return
Exemplo n.º 14
0
    def share_word_vocabulary_from(self, dataset):
        assert hasattr(dataset, 'idx2word') and hasattr(
            dataset, 'word2idx'
        ), 'The dataset instance should have idx2word and word2idx'
        assert (
            isinstance(dataset.idx2word, dict)
            or isinstance(dataset.idx2word, list)
        ) and isinstance(
            dataset.word2idx, dict
        ), 'The dataset instance should have idx2word and word2idx (as dict)'

        if hasattr(self, 'word2idx'):
            log.warn("Overriding %s' word vocabulary from %s ...", self,
                     dataset)

        self.idx2word = dataset.idx2word
        self.word2idx = dataset.word2idx
        self.ans2idx = dataset.ans2idx
        self.idx2ans = dataset.idx2ans
        if hasattr(dataset, 'word_matrix'):
            self.word_matrix = dataset.word_matrix
Exemplo n.º 15
0
        def f_phi(g, scope='f_phi'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                fc_1 = fc(g, 256, name='fc_1')
                fc_1 = slim.dropout(fc_1, keep_prob=0.5, is_training=is_train, scope='fc_2/')

                fc_a_2 = fc(fc_1, 256, name='fc_a_2')
                fc_a_2 = slim.dropout(fc_a_2, keep_prob=0.5, is_training=is_train, scope='fc_a_3/')
                fc_a_3 = fc(fc_a_2, 256, name='fc_a_3')
                fc_a_3 = slim.dropout(fc_a_3, keep_prob=0.5, is_training=is_train, scope='fc_a_4/')
                fc_a_4 = fc(fc_a_3, n, activation_fn=None, name='fc_a_4')

                if self.config.location:
                    fc_r_2 = fc(fc_1, 256, name='fc_r_2')
                    fc_r_2 = slim.dropout(fc_r_2, keep_prob=0.5, is_training=is_train, scope='fc_r_3/')
                    fc_r_3 = fc(fc_r_2, 256, name='fc_r_3')
                    fc_r_3 = slim.dropout(fc_r_3, keep_prob=0.5, is_training=is_train, scope='fc_r_4/')
                    rfc_3 = fc(fc_r_3, self.l_dim, activation_fn=None, name='pred_x_y')
                    return fc_a_4,rfc_3

                return fc_a_4
Exemplo n.º 16
0
        def Encoder(input_image, pose, scope='Encoder', reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warn(scope.name)
                _ = add_image_coord(input_image)

                # Pose stack
                h, w = int(_.get_shape()[1]), int(_.get_shape()[2])
                pose = tf.reshape(pose, [self.batch_size, 1, 1, -1])
                pose = tf.tile(pose, [1, h, w, 1], name='pose_map')
                _ = tf.concat([_, pose], axis=-1, name='concat_pose_img')
                if not reuse: log.info('{} {}'.format(
                    _.name, _.get_shape().as_list()))

                # Conv
                all_output = []
                for i in range(self.num_conv_layer):
                    _ = conv2d(_, 2 ** (i+5), is_train, k_h=4, k_w=4,
                               info=not reuse, norm=self.norm_type,
                               name='conv{}'.format(i+1))
                    all_output.append(_)
                return all_output
Exemplo n.º 17
0
 def _getSubtitles(self):
     """ Return subtitle files for this video. """
     subtitles = []
     for path in SUBTITLE_DIRS:
         dirPath = "%s/%s" % (self.dirPath, path)
         if (os.path.exists(dirPath)):
             for fileName in os.listdir(dirPath):
                 if (fileName.endswith('.srt')):
                     self.subsFound = True
                     subtitles.append("%s/%s" % (path, fileName))
                 elif (fileName.endswith('.idx')):
                     self.subsFound = True
                     if (self._idxSubtitlesOK(dirPath, fileName)):
                         subtitles.append("%s/%s" % (path, fileName))
                 elif (fileName.endswith('.sub')):
                     self.subsFound = True
     # Check we have same number of subtitles as video files
     if (subtitles) and (len(subtitles) != len(self.curFileNames)):
         log.warn("  Mismatch between len(videos) and len(subtitles): %s" % self.dirPath)
         return None
     return sorted(subtitles)
Exemplo n.º 18
0
    def __init__(self, config):
        """
        creates train directory where data results are saved and stores model
        configurations as .json

        """

        if config.train_dir is None:
            import tempfile
            tempd = tempfile.mkdtemp(
                prefix='tmp-' + time.strftime('%Y%m%d-%H%M%S') + '-',
                suffix='-' +
                urlify(config.train_tag) if config.train_tag else '')
            log.info("Using temp dir {}".format(tempd))

        self.train_dir = config.train_dir or tempd
        self.writer = tf.summary.FileWriter(
            self.train_dir, self.session.graph
        )  # nice tf way of writing output without slowing down training
        log.warn("Train dir : %s", self.train_dir)

        config_file = os.path.join(self.train_dir, 'config.json')
        if not os.path.exists(config_file):
            self.config.dump(config_file)
        else:
            log.warn("config_file %s already exists (skipped)", config_file)

        config_pkl = os.path.join(self.train_dir, 'config.pkl')
        if not os.path.exists(config_pkl):
            with open(config_pkl, 'wb') as f:
                pkl.dump(config, f)
        else:
            log.warn("config_file %s already exists (skipped)", config_pkl)
Exemplo n.º 19
0
    def build(self, is_train=True):

        # Decoder {{{
        # =========
        # Input: an activity [bs, v]
        # Output: [bs, [x, y, v]]

        D = Decoder_Neuron('Decoder_Neuron', self.output_dim,
                           self.output_act_fn, self.num_d_fc, self.d_norm_type,
                           is_train)

        pred_label = D(self.activity)
        self.pred_label = pred_label

        # }}}

        # Build losses {{{
        # =========
        # compute loss
        if self.loss_type == 'l1':
            self.ori_loss = tf.abs(self.label - pred_label)
            self.loss = tf.reduce_mean(self.ori_loss)
        elif self.loss_type == 'l2':
            self.ori_loss = (self.label - pred_label)**2
            self.loss = tf.reduce_mean(self.ori_loss)
        else:
            raise NotImplementedError
        # }}}

        # TensorBoard summaries {{{
        # =========
        tf.summary.scalar("loss/loss", self.loss)
        # }}}

        # Output {{{
        # =========
        self.output = {'pred_label': pred_label}
        # }}}

        log.warn('\033[93mSuccessfully loaded the model.\033[0m')
Exemplo n.º 20
0
def test163():
	url = 'http://music.163.com/api/playlist/detail?id={}'.format(list_id[0])
	log.info(url)
	log.info('Grab album ...')
	r = requests.get(url)
	data = r.json()
	result = data['result']
	tracks = result['tracks']
	total = len(tracks)

	for i, item in enumerate(tracks):
		log.info('Deal with {}th/{} song'.format(i + 1, total))
		stamp = int(item['album']['publishTime']) / 1000
		t = datetime.fromtimestamp(stamp)

		if isSongExists(item['id']):
			log.warn('The song exists and skip')
			continue

		song = Song(item['id'], item['name'], item['artists'][0]['name'], item['mp3Url'], t, '163')
		song.save()
		delay(1)
Exemplo n.º 21
0
    def eval_run(self):
        # load checkpoint
        if self.checkpoint_path:
            self.saver.restore(self.session, self.checkpoint_path)
            log.info("Loaded from checkpoint!")

        log.infov("Start 1-epoch Inference and Evaluation")

        log.info("# of examples = %d", len(self.dataset))
        length_dataset = len(self.dataset)

        max_steps = self.config.max_steps
        log.info("max_steps = %d", max_steps)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord,
                                               start=True)

        evaler = PoseEvalManager()
        try:
            for s in xrange(max_steps):
                step, step_time, batch_chunk, prediction_pred, prediction_gt = \
                    self.run_single_step(self.batch)
                self.log_step_message(s, step_time)
                evaler.add_batch(batch_chunk['id'], prediction_pred,
                                 prediction_gt)

        except Exception as e:
            coord.request_stop(e)

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))  # just simply ignore as of now

        if self.config.output_file:
            evaler.dump_result(self.config.output_file)
Exemplo n.º 22
0
    def eval_run(self):
        # load checkpoint
        if self.checkpoint:
            self.saver.restore(self.session, self.checkpoint)
            log.info("Loaded from checkpoint!")

        log.infov("Start 1-epoch Inference and Evaluation")

        log.info("# of examples = %d", len(self.dataset))
        length_dataset = len(self.dataset)

        max_steps = int(length_dataset / self.batch_size) + 1
        log.info("max_steps = %d", max_steps)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord,
                                               start=True)

        evaler = EvalManager()
        try:
            for s in xrange(max_steps):
                step, loss, step_time, batch_chunk, prediction_pred, prediction_gt = \
                    self.run_single_step(self.batch)
                self.log_step_message(s, loss, step_time)
                evaler.add_batch(batch_chunk['id'], prediction_pred,
                                 prediction_gt)

        except Exception as e:
            coord.request_stop(e)

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))

        evaler.report()
        log.infov("Evaluation complete.")
Exemplo n.º 23
0
        def CONV(img, q, scope='CONV'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                conv_1 = conv2d(img,
                                conv_info[0],
                                is_train,
                                s_h=3,
                                s_w=3,
                                name='conv_1')
                conv_2 = conv2d(conv_1,
                                conv_info[1],
                                is_train,
                                s_h=3,
                                s_w=3,
                                name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')

                # eq.1 in the paper
                # g_theta = (o_i, o_j, q)
                # conv_4 [B, d, d, k]
                d = conv_4.get_shape().as_list()[1]
                all_g = []
                for i in range(d * d):
                    o_i = conv_4[:, int(i / d), int(i % d), :]
                    o_i = concat_coor(o_i, i, d)
                    for j in range(d * d):
                        o_j = conv_4[:, int(j / d), int(j % d), :]
                        o_j = concat_coor(o_j, j, d)
                        if i == 0 and j == 0:
                            g_i_j = g_theta(o_i, o_j, q, reuse=False)
                        else:
                            g_i_j = g_theta(o_i, o_j, q, reuse=True)
                        all_g.append(g_i_j)

                all_g = tf.stack(all_g, axis=0)
                all_g = tf.reduce_mean(all_g, axis=0, name='all_g')
                return all_g
Exemplo n.º 24
0
def check_dataset(file_path, dataset_name):
    assert dataset_name in ['car', 'chair', 'kitti', 'synthia']

    if not osp.isfile(file_path):
        log.warn('The {} dataset is not found. '
                 'Downloading the dataset now...'.format(dataset_name))

        if dataset_name == 'car':
            download_file_from_google_drive('1vrZURHH5irKrxPFuw6e9mZ3wh2RqzFC9',
                                            './datasets/shapenet/data_car.hdf5')
        elif dataset_name == 'chair':
            download_file_from_google_drive('1-IbmdJqi37JozGuDJ42IzOFG_ZNAksni',
                                            './datasets/shapenet/data_chair.hdf5')
        elif dataset_name == 'kitti':
            download_file_from_google_drive('1LT3WoHxdCycu4jTxCGc1vGYpdRWridFH',
                                            './datasets/kitti/data_kitti.hdf5')
        elif dataset_name == 'synthia':
            download_file_from_google_drive('1Fxv5r7oeG0PHgR42S5pHNvyl2pJN739H',
                                            './datasets/synthia/data_synthia.hdf5')
        else:
            raise NotImplementedError
    else:
        log.warn('Found {} dataset at {}'.format(dataset_name, file_path))
Exemplo n.º 25
0
    def _run_item(self):
        while self.queue and len(self.active) < self.client_config.max_clients:
            log.warn("^&^^^^^^^^^^^^^^^^^")
            key, connection_item = self.queue.get()
            log.warn((key, connection_item))
            if key not in self.waiting:
                continue

            log.warn("9999999999")
            self._off_waiting_timeout(key)
            self.active[key] = connection_item
            self._handle_connection_item(
                connection_item,
                functools.partial(self._release_connection_item, key))
Exemplo n.º 26
0
def load_model(args):
    global model, config, session

    assert 0.0 < args.gpu_fraction <= 1.0
    session = tf.Session(config=tf.ConfigProto(
        gpu_options=tf.GPUOptions(
            per_process_gpu_memory_fraction=args.gpu_fraction),
        device_count={'GPU': True},  # self-testing: NO GPU, USE CPU
    ))
    log.warn('Reloading Model ...')

    log.infov('MODEL   : %s', args.model)

    if args.model == 'gaze_grcn':
        from gaze_grcn import GazePredictionGRCN as TheModel
        from gaze_grcn import CONSTANTS, GRUModelConfig
    else:
        raise NotImplementedError(args.model)

    # default configuration as of now
    config = GRUModelConfig()
    config.batch_size = args.batch_size or 14
    config.train_dir = None  #important
    config.n_lstm_steps = 105

    if args.loss_type is not None:  #important
        config.loss_type = args.loss_type
    else:
        config.loss_type = 'xentropy'

    #print(type(config))
    config.dump(sys.stdout)  # causes error at second call for some reason

    # dummy
    data_sets = crc_input_data_seq.CRCDataSplits()
    data_sets.train = data_sets.test = data_sets.valid = []
    log.warn('Dataset Loading Finished ! (%d instances)', len(data_sets))

    model = TheModel(session, data_sets, config)
    print model

    # load checkpoint
    checkpoint_path = os.path.abspath(args.checkpoint_path)

    #assert os.path.isfile(checkpoint_path)  #assertion fails but model shouldn't be loaded with filename extension so this needs to go!
    model.load_model_from_checkpoint_file(checkpoint_path)

    log.warn('Model Loading Done!!')

    return model
                    type=str,
                    default='data/preprocessed/visualgenome'
                    '/memft_all_new_vocab50_obj3000_attr1000_maxlen10',
                    help=' ')
parser.add_argument('--class_feat_dim', type=int, default=2048, help=' ')
parser.add_argument('--checkpoint',
                    type=str,
                    required=True,
                    help='ex) ./model-1')
config = parser.parse_args()

ckpt_dir = os.path.dirname(config.checkpoint)
ckpt_name = os.path.basename(config.checkpoint)
config.save_dir = os.path.join(ckpt_dir, 'word_weights_{}'.format(ckpt_name))
if not os.path.exists(config.save_dir):
    log.warn('create directory: {}'.format(config.save_dir))
    os.makedirs(config.save_dir)
else:
    raise ValueError('Do not overwrite: {}'.format(config.save_dir))

vocab_path = os.path.join(config.data_dir, 'vocab.pkl')
vocab = cPickle.load(open(vocab_path, 'rb'))

answer_dict_path = os.path.join(config.data_dir, 'answer_dict.pkl')
answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
num_answer = len(answer_dict['vocab'])

v_word_map = modules.LearnGloVe(vocab, scope='V_GloVe')
l_word_map = modules.LearnGloVe(vocab, scope='L_GloVe')
l_answer_word_map = modules.LearnAnswerGloVe(answer_dict)
Exemplo n.º 28
0
    def eval_run(self):
        # load checkpoint
        if self.checkpoint:
            self.saver.restore(self.session, self.checkpoint)
            log.info("Loaded from checkpoint!")

        log.infov("Start Inference and Evaluation")

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord,
                                               start=True)

        use_test_id_list = self.config.data_id_list is not None

        try:
            if use_test_id_list:
                with open(self.config.data_id_list, 'r') as id_list_path:
                    id_list = id_list_path.readlines()
                self.id_list = [id.strip().split(' ') for id in id_list]

            if self.config.plot_image:
                if not os.path.exists(self.config.output_dir):
                    os.makedirs(self.config.output_dir)

            if self.config.loss or self.config.plot_image:
                loss_all = []
                time_all = 0
                step = None
                s = 0
                continue_evaluate = True
                while continue_evaluate:
                    # get testing batch
                    if use_test_id_list:
                        batch_id_list = self.id_list[self.batch_size *
                                                     s:self.batch_size *
                                                     (s + 1)]
                        batch_chunk = self.get_batch_chunk(batch_id_list)
                    else:
                        batch_chunk = self.get_batch_chunk()

                    # inference
                    step, loss, img, batch_id, step_time = \
                        self.run_single_step(batch_chunk, step=s)

                    # plot images
                    if self.config.plot_image:
                        if use_test_id_list:
                            for i in range(self.batch_size):
                                for img_key in img.keys():
                                    model_name = batch_id_list[i][0].split(
                                        '_')[0]
                                    target_id = '_'.join(
                                        batch_id_list[i][0].split('_')[1:])
                                    source_id = '-'.join([
                                        '_'.join(id.split('_')[1:])
                                        for id in batch_id_list[i][1:]
                                    ])
                                    img_name = '{}_target_{}_source_{}_{}.png'.format(
                                        model_name, target_id, source_id,
                                        img_key)
                                    if self.config.plot_image:
                                        imageio.imwrite(
                                            osp.join(self.config.output_dir,
                                                     img_name),
                                            img[img_key][i])
                        else:
                            raise ValueError(
                                'Plotting images requires an id list.')

                    loss_all.append(np.array(loss.values()))
                    time_all += step_time

                    s += 1
                    if use_test_id_list:
                        continue_evaluate = s < len(
                            self.id_list) / self.batch_size
                    else:
                        continue_evaluate = s < self.config.max_steps

                    # report loss
                    if not self.config.quiet:
                        loss_avg = np.average(np.stack(loss_all), axis=0)
                        self.log_message(
                            s,
                            loss_avg,
                            loss.keys(),
                            time_all,
                            write_summary=self.config.write_summary,
                            summary_file=self.config.summary_file,
                            final_step=not continue_evaluate,
                        )

        except Exception as e:
            coord.request_stop(e)

        log.warning('Completed Evaluation.')

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))
Exemplo n.º 29
0
    def _read_message(self, connection_item):
        try:

            if not self.connection.is_avaliable_stream():
                log.error("Malformed Client Request stream closed")
                raise gen.Return(False)

            #: read header data
            header_data_future = self.connection.stream.read_until_regex(
                regex=message_utils.header_delimiter,
                max_bytes=connection_item.header_max_bytes)

            if connection_item.header_timeout is None:
                self._header_data = yield header_data_future
            else:
                try:
                    self._header_data = yield gen.with_timeout(
                        timeout=self._io_loop.time() +
                        connection_item.header_timeout,
                        future=header_data_future,
                        io_loop=self._io_loop)
                except gen.TimeoutError:
                    log.error("Timeout reading header from {}".format(
                        self.client_config.address_str))
                    raise gen.Return(False)

            #: parse header data
            try:
                header_tube = message_utils.parse_header(
                    CONNECTION_TYPE_IN_REQUEST, self._header_data)

            except RPCInputError as e:
                log.warn(e.error)
                raise gen.Return(False)

            self._message.topic = header_tube.topic

            #: read body data
            body_data_future = self.connection.stream.read_bytes(
                header_tube.body_len + len(message_utils.body_suffix))

            if connection_item.body_timeout is None:
                self._body_data = yield body_data_future
            else:
                try:
                    self._body_data = yield gen.with_timeout(
                        timeout=self._io_loop.time() +
                        connection_item.body_timeout,
                        future=body_data_future,
                        io_loop=self._io_loop)
                except gen.TimeoutError:
                    log.error("Timeout reading body from {}".format(
                        self.client_config.address_str))
                    raise gen.Return(False)

            #: parse body data
            try:
                body_msg = message_utils.parse_body(self._body_data)
            except RPCInputError as e:
                log.error(e.error)
                raise gen.Return(False)

            self._message.body = body_msg

        except StreamClosedError:
            raise gen.Return(False)
        raise gen.Return(True)
    def eval_run(self):
        # load checkpoint///
        if self.checkpoint_path:
            self.saver.restore(self.session, self.checkpoint_path)
            log.info("Loaded from checkpoint!")

        log.infov("Start Inference and Evaluation")

        log.info("# of testing examples = %d", len(self.dataset))
        length_dataset = len(self.dataset)

        max_steps = int(length_dataset / self.batch_size) + 1

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(self.session,
                                               coord=coord, start=True)

        evaler = EvalManager()

        if not (self.config.recontrain or self.config.interpolate or self.config.generate or self.config.reconstruct):
            raise ValueError('Please specify at least one task by indicating' +
                             '--reconstruct, --generate, or --interpolate.')
            return

        if self.config.recontrain:
            try:
               loss_z_update = 100000
               s = 0
               max_steps = 100000
               min_loss = 10000000 
               for s in xrange(max_steps):
                   step, z,  loss_g_update, loss_z_update, batch_chunk, step_time = \
                       self.run_single_z_update_step(self.batch, self.dataset, step=s, is_train=False)
                   if (loss_z_update < min_loss):
                      min_loss = loss_z_update
                   if s % 10000 == 0: 
                      self.log_train_step_message(step, loss_g_update, loss_z_update, min_loss, step_time)
                      m, l = self.dataset.get_data(batch_chunk['id'][0])
                      x_a = self.generator(z)
                      imageio.imwrite('generate_z_batch_step_{}.png'.format(s), x_a[0])
                      imageio.imwrite('original_img_from_batch_img_step_{}.png'.format(s), m)
            except Exception as e:
                coord.request_stop(e)

            log.warning('Completed reconstruction.')
 
        if self.config.reconstruct:
            try:
                for s in xrange(max_steps):
                    step, loss, step_time, batch_chunk, prediction_pred, prediction_gt = \
                        self.run_single_step(self.batch)
                    self.log_step_message(s, loss, step_time)
                    evaler.add_batch(batch_chunk['id'], prediction_pred, prediction_gt)
            except Exception as e:
                coord.request_stop(e)

            evaler.report()
            log.warning('Completed reconstruction.')



        if self.config.generate:
          if(self.config.few_shot_cap > 0): 
             z_all = []
             for train_id in self.dataset_train.few_shot_train_ids:
                 imgi, z_x = self.dataset_train.get_data(train_id)
                 imageio.imwrite('original_img_{}.png'.format(train_id), imgi) 
                 z_all.append(np.array(z_x))
             for idx in range(len(z_all)):
                 if (idx == len(z_all)-1):
                     x_a = self.generator(z_all[idx][np.newaxis,:] )
                     img_id = self.dataset_train.few_shot_train_ids[idx]
                     imageio.imwrite('generate_{}.png'.format(img_id), self.image_grid(x_a))
                     break

                 z_a = np.sum([z_all[idx]*0.3,  z_all[idx+1]*0.7], axis=0)
                 z_b = np.sum([z_all[idx]*0.5,  z_all[idx+1]*0.5], axis=0)
                 z_c = np.sum([z_all[idx]*0.7,  z_all[idx+1]*0.3], axis=0) 
                 z_d = np.sum([z_all[idx]*1,  z_all[idx+1]*0], axis=0)
                 
                 x_a = self.generator(z_a[np.newaxis,:])
                 fst_img_id = self.dataset_train.few_shot_train_ids[idx]
                 snd_img_id = self.dataset_train.few_shot_train_ids[idx+1] 
                 imageio.imwrite('generate_3_{}_7_{}.png'.format(fst_img_id,snd_img_id), self.image_grid(x_a))
                 x_b = self.generator(z_b[np.newaxis,:])
                 imageio.imwrite('generate_5_{}_5_{}.png'.format(fst_img_id,snd_img_id), self.image_grid(x_b)) 
                 x_c = self.generator(z_c[np.newaxis,:])
                 imageio.imwrite('generate_7_{}_3_{}.png'.format(fst_img_id,snd_img_id), self.image_grid(x_c))
                 x_d = self.generator(z_d[np.newaxis,:])
                 imageio.imwrite('generate_{}.png'.format(fst_img_id), self.image_grid(x_d))
          elif(self.config.few_shot_cap == 0 ):
             for test_id in self.dataset._ids:
                  img, z = self.dataset.get_data(test_id)
                  imageio.imwrite('original_{}.png'.format(test_id), img)
                  x = self.generator(z[np.newaxis,:])
                  imageio.imwrite('generate_{}.png'.format(test_id), self.image_grid(x)) 
          elif(self.config.few_shot_cap == None):
             #TODO():you changed the signature of this method. fix it.
             x = self.generator(self.batch_size)
             img = self.image_grid(x)
             imageio.imwrite('generate_{}.png'.format(self.config.prefix), img)
             log.warning('Completed generation. Generated samples are save' +
                        'as generate_{}.png'.format(self.config.prefix)) 

        if self.config.interpolate:
            x = self.interpolator(self.dataset_train, self.batch_size)
            img = self.image_grid(x)
            imageio.imwrite('interpolate_{}.png'.format(self.config.prefix), img)
            log.warning('Completed interpolation. Interpolated samples are save' +
                        'as interpolate_{}.png'.format(self.config.prefix))

        coord.request_stop()
        try:
            coord.join(threads, stop_grace_period_secs=3)
        except RuntimeError as e:
            log.warn(str(e))

        log.infov("Completed evaluation.")
Exemplo n.º 31
0
    def __init__(self, config, dataset, dataset_test):
        self.config = config
        hyper_parameter_str = config.dataset + '_lr_' + str(
            config.learning_rate) + '_update_G' + str(
                config.update_rate) + '_D' + str(1)
        self.train_dir = './train_dir/%s-%s-%s' % (
            config.prefix, hyper_parameter_str, time.strftime("%Y%m%d-%H%M%S"))

        if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)
        log.infov("Train Dir: %s", self.train_dir)

        # --- input ops ---
        self.batch_size = config.batch_size

        _, self.batch_train = create_input_ops(dataset,
                                               self.batch_size,
                                               is_training=True)
        _, self.batch_test = create_input_ops(dataset_test,
                                              self.batch_size,
                                              is_training=False)

        # --- create model ---
        self.model = Model(config)

        # --- optimizer ---
        self.global_step = tf.contrib.framework.get_or_create_global_step(
            graph=None)
        self.learning_rate = config.learning_rate
        if config.lr_weight_decay:
            self.learning_rate = tf.train.exponential_decay(
                self.learning_rate,
                global_step=self.global_step,
                decay_steps=10000,
                decay_rate=0.5,
                staircase=True,
                name='decaying_learning_rate')

        self.check_op = tf.no_op()

        # --- checkpoint and monitoring ---
        all_vars = tf.trainable_variables()

        d_var = [v for v in all_vars if v.name.startswith('Discriminator')]
        log.warn("********* d_var ********** ")
        slim.model_analyzer.analyze_vars(d_var, print_info=True)

        g_var = [v for v in all_vars if v.name.startswith(('Generator'))]
        log.warn("********* g_var ********** ")
        slim.model_analyzer.analyze_vars(g_var, print_info=True)

        rem_var = (set(all_vars) - set(d_var) - set(g_var))
        print([v.name for v in rem_var])
        assert not rem_var

        self.d_optimizer = tf.contrib.layers.optimize_loss(
            loss=self.model.d_loss,
            global_step=self.global_step,
            learning_rate=self.learning_rate * 0.5,
            optimizer=tf.train.AdamOptimizer(beta1=0.5),
            clip_gradients=20.0,
            name='d_optimize_loss',
            variables=d_var)

        self.g_optimizer = tf.contrib.layers.optimize_loss(
            loss=self.model.g_loss,
            global_step=self.global_step,
            learning_rate=self.learning_rate,
            optimizer=tf.train.AdamOptimizer(beta1=0.5),
            clip_gradients=20.0,
            name='g_optimize_loss',
            variables=g_var)

        self.summary_op = tf.summary.merge_all()

        self.saver = tf.train.Saver(max_to_keep=100)
        self.summary_writer = tf.summary.FileWriter(self.train_dir)

        self.checkpoint_secs = 600  # 10 min

        self.supervisor = tf.train.Supervisor(
            logdir=self.train_dir,
            is_chief=True,
            saver=None,
            summary_op=None,
            summary_writer=self.summary_writer,
            save_summaries_secs=300,
            save_model_secs=self.checkpoint_secs,
            global_step=self.global_step,
        )

        session_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True),
            device_count={'GPU': 1},
        )
        self.session = self.supervisor.prepare_or_wait_for_session(
            config=session_config)

        self.ckpt_path = config.checkpoint
        if self.ckpt_path is not None:
            log.info("Checkpoint path: %s", self.ckpt_path)
            self.pretrain_saver.restore(self.session, self.ckpt_path)
            log.info(
                "Loaded the pretrain parameters from the provided checkpoint path"
            )
Exemplo n.º 32
0
        def CONV(img, q, scope='CONV'):
            with tf.variable_scope(scope) as scope:

                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')


##### Edited version
                net = conv_4 # [16, 4, 4, 24]
                shape = net.get_shape().as_list()
                self.d, d = shape[2], shape[2]

                _ = tf.range(1, delta=1/d)
                g = tf.stack( tf.meshgrid(_, _), -1 )[None] # [1, 4, 4, 2]
                # print ('\ng', g.shape, '\n') # after tile: g.shape = [shape[0], 4, 4, 2] = [16, 4, 4, 2]
                net = tf.concat([ net, tf.tile(g, [shape[0],1,1,1]) ], axis=-1) # (16, 4, 4, 26=24+2)

                # TODO: flatten
                all_singletons = [ net[:, int(i / d), int(i % d), :] for i in range(d*d) ]  # (16, batch, 26)

                all_pairs = [ tf.concat([all_singletons[i], all_singletons[j], q], axis=-1)  # q: (batch, 11)
                            for i in range(d*d) for j in range(d*d) ] # (256=16*16, batch, 63) 11+26+26=63

                net = tf.concat(all_pairs, axis=0) # net=(4096, 63)
                # print ('\nnet cat', net.shape, '\n')

                g_1 = fc(net, 256, activation_fn=tf.nn.relu)
                g_2 = fc(g_1, 256, activation_fn=tf.nn.relu)
                g_3 = fc(g_2, 256, activation_fn=tf.nn.relu)
                # print ('\ng_3', g_3.shape, '\n')

                net = tf.reshape(g_3, [d**4, shape[0], 256])
                all_g = net


                all_feature = [tf.concat([all_singletons[i], all_singletons[j]], axis=-1)
                            for i in range(d*d) for j in range(d*d)] # (256, batch, 52)
                all_question = tf.tile(q[None], [d**4, 1, 1]) #  (256, batch, 11)
                # ================================================================

##### End of Edited version
####  Original RN
                # d = conv_4.get_shape().as_list()[1]
                # all_g, all_feature, all_question = [], [], []

                # for i in range(d*d):
                #     o_i = conv_4[:, int(i / d), int(i % d), :]
                #     o_i = concat_coor(o_i, i, d)
                #     for j in range(d*d):
                #         o_j = conv_4[:, int(j / d), int(j % d), :]
                #         o_j = concat_coor(o_j, j, d)

                #         # ================================================================
                #         all_feature.append(tf.concat([o_i, o_j], axis=1))
                #         all_question.append(q)
                #         # ================================================================

                #         if i == 0 and j == 0:
                #             g_i_j = g_theta(o_i, o_j, q, reuse=False)
                #         else:
                #             g_i_j = g_theta(o_i, o_j, q, reuse=True)
                #         all_g.append(g_i_j)

                # all_g = tf.stack(all_g, axis=0)
####  End of Original RN

                # ====================================================================================================
                # Added for weights before the first MLP.

                all_question = tf.stack(all_question, axis=0)
                all_feature = tf.stack(all_feature, axis=0)
                all_shape = all_question.get_shape().as_list() #

                q_len = all_shape[2]

                converted_feature = tl.fully_connected(\
                    all_feature, q_len, reuse=tf.AUTO_REUSE, scope='convert_fc', activation_fn=tf.nn.tanh)

                # weights1, weights2 [d*d, batch, 1]
                # all_question [d*d, batch, 11]

                weights_1 = get_weights(all_question, converted_feature)

                weighted_feature_1 = tf.multiply(weights_1, converted_feature, name="weight_feature_1") * q_len

                weights_2 = get_weights(all_question, weighted_feature_1)

                weighted_feature_2 = tf.multiply(weights_2, weighted_feature_1, name="weight_feature_2") * q_len

                weights_3 = get_weights(all_question, weighted_feature_2)

                # check_tensor(all_g, "all_g") [d*d, batch, concated_feature_dim]

                # old_len = all_g.get_shape().as_list()[2]
                # features, questions = tf.split(all_g, [old_len - q_len,q_len], axis=2, name="split_old")
                # weighted_features = tf.multiply(features, weights_1, name='weight_feature_1')
                # final_features = weighted_features * \
                #                     (tf.reduce_mean(features, axis=2, keepdims=True) / \
                #                      tf.reduce_mean(weighted_features, axis=2, keepdims=True))
                # all_g = tf.concat([final_features, questions], axis=2)

                # weight_name = np.arange(1, np.prod(weights_3.get_shape().as_list())+1).reshape(tf.squeeze(weights_3).get_shape().as_list()).tolist()
                # tf.summary.scalar([['wei' + str(i)  for i in j] for j in weight_name], tf.squeeze(weights_3))

                tf.summary.histogram('weights_1', weights_1)
                tf.summary.histogram('weights_2', weights_2)
                tf.summary.histogram('weights_3', weights_3)

                self.weights_1, self.weights_2, self.weights_3 = tf.transpose(weights_1, perm=[1,0,2]), tf.transpose(weights_2, perm=[1,0,2]), tf.transpose(weights_3, perm=[1,0,2])

                all_g = tf.multiply(all_g, weights_3, name="weight_all_g") * all_g.get_shape().as_list()[0]

                # ====================================================================================================

                all_g = tf.reduce_mean(all_g, axis=0, name='all_g')

                return all_g
Exemplo n.º 33
0
    def __init__(self, config, model, dataset, dataset_type_str):
        self.config = config
        self.model = model
        self.train_dir = config.train_dir
        self.dataset_type_str = dataset_type_str
        self.summary_file = dataset_type_str + '_' + config.summary_file
        self.summary_model_file = dataset_type_str + '_' + config.summary_model_file
        self.summary_indv_file = dataset_type_str + '_' + config.summary_indv_file
        log.infov("Train_dir path = %s", self.train_dir)

        # --- input ops ---
        self.batch_size = config.batch_size
        self.dataset_path = config.dataset_path
        self.dataset = dataset

        self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
        self.step_op = tf.no_op(name='step_no_op')

        # --- vars ---
        self.model_vars = tf.trainable_variables()
        log.warning("********* var ********** ")
        model_vars = slim.model_analyzer.analyze_vars(self.model_vars, print_info=True)
        self.num_model_params = model_vars[0]

        # -- session --
        tf.set_random_seed(1234)

        session_config = tf.ConfigProto(
            allow_soft_placement=True,
            gpu_options=tf.GPUOptions(allow_growth=True),
            device_count={'GPU': 1},
        )
        self.session = tf.Session(config=session_config)

        # --- checkpoint and monitoring ---
        self.saver = tf.train.Saver(max_to_keep=100)

        self.checkpoint = config.checkpoint
        if self.checkpoint is None and self.train_dir:
            self.checkpoint = tf.train.latest_checkpoint(self.train_dir)
            log.infov("Checkpoint path : %s", self.checkpoint)
        elif self.checkpoint is None:
            log.warn("No checkpoint is given. Just random initialization.")
            self.session.run(tf.global_variables_initializer())
        elif self.train_dir:
            self.checkpoint = os.path.join(self.train_dir, self.checkpoint)
            log.infov("Checkpoint path : %s", self.checkpoint)
        else:
            log.infov("Checkpoint path : %s", self.checkpoint)

        # -- directory setup --
        if self.train_dir is None:
            train_dir_base = os.path.basename(os.path.dirname(self.checkpoint))
        else:
            train_dir_base = os.path.basename(self.train_dir)

        checkpoint_base = os.path.basename(self.checkpoint)

        self.val_dir = './val_dir/%s/%s/%s' %(self.config.prefix,
                                              train_dir_base, checkpoint_base)
        print(self.val_dir)
Exemplo n.º 34
0
    def build(self, is_train=True):
        n = self.a_dim
        conv_info = self.conv_info

        # build loss and accuracy {{{
        def build_loss(logits, labels):
            # Cross-entropy loss
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return tf.reduce_mean(loss), accuracy
        # }}}

        def concat_coor(o, i, d):
            coor = tf.tile(tf.expand_dims(
                [float(int(i / d)) / d, (i % d) / d], axis=0), [self.batch_size, 1])
            o = tf.concat([o, tf.to_float(coor)], axis=1)
            return o

        def g_theta(o_i, o_j, q, scope='g_theta', reuse=True):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warn(scope.name)
                g_1 = fc(tf.concat([o_i, o_j, q], axis=1), 256, name='g_1')
                g_2 = fc(g_1, 256, name='g_2')
                g_3 = fc(g_2, 256, name='g_3')
                g_4 = fc(g_3, 256, name='g_4')
                return g_4

        # Classifier: takes images as input and outputs class label [B, m]
        def CONV(img, q, scope='CONV'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')

                # eq.1 in the paper
                # g_theta = (o_i, o_j, q)
                # conv_4 [B, d, d, k]
                d = conv_4.get_shape().as_list()[1]
                all_g = []
                for i in range(d*d):
                    o_i = conv_4[:, int(i / d), int(i % d), :]
                    o_i = concat_coor(o_i, i, d)
                    for j in range(d*d):
                        o_j = conv_4[:, int(j / d), int(j % d), :]
                        o_j = concat_coor(o_j, j, d)
                        if i == 0 and j == 0:
                            g_i_j = g_theta(o_i, o_j, q, reuse=False)
                        else:
                            g_i_j = g_theta(o_i, o_j, q, reuse=True)
                        all_g.append(g_i_j)

                all_g = tf.stack(all_g, axis=0)
                all_g = tf.reduce_mean(all_g, axis=0, name='all_g')
                return all_g

        def f_phi(g, scope='f_phi'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                fc_1 = fc(g, 256, name='fc_1')
                fc_2 = fc(fc_1, 256, name='fc_2')
                fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
                fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
                return fc_3

        g = CONV(self.img, self.q, scope='CONV')
        logits = f_phi(g, scope='f_phi')
        self.all_preds = tf.nn.softmax(logits)
        self.loss, self.accuracy = build_loss(logits, self.a)

        # Add summaries
        def draw_iqa(img, q, target_a, pred_a):
            fig, ax = tfplot.subplots(figsize=(6, 6))
            ax.imshow(img)
            ax.set_title(question2str(q))
            ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
            return fig

        try:
            tfplot.summary.plot_many('IQA/',
                                     draw_iqa, [self.img, self.q, self.a, self.all_preds],
                                     max_outputs=3,
                                     collections=["plot_summaries"])
        except:
            pass

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/cross_entropy", self.loss)
        log.warn('Successfully loaded the model.')
Exemplo n.º 35
0
    def build(self, is_train=True):

        n = self.a_dim
        conv_info = self.conv_info

        # build loss and accuracy {{{
        def build_loss(logits, labels):
            # Cross-entropy loss
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return tf.reduce_mean(loss), accuracy
        # }}}

        def concat_coor(o, i, d):
            coor = tf.tile(tf.expand_dims(
                [float(int(i / d)) / d, (i % d) / d], axis=0), [self.batch_size, 1])
            o = tf.concat([o, tf.to_float(coor)], axis=1)
            return o

        def g_theta(o_i, o_j, q, scope='g_theta', reuse=True):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warn(scope.name)
                # check_tensor(o_i, name="o_i")  [batch, 26]
                # check_tensor(o_j, name="o_j")  [batch, 26]
                # check_tensor(q, name='q')      [batch, 11]
                g_1 = fc(tf.concat([o_i, o_j, q], axis=1), 256, name='g_1')
                g_2 = fc(g_1, 256, name='g_2')
                g_3 = fc(g_2, 256, name='g_3')
                g_4 = fc(g_3, 256, name='g_4')
                return g_4

        # Classifier: takes images as input and outputs class label [B, m]
        def CONV(img, q, scope='CONV'):
            with tf.variable_scope(scope) as scope:

                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')


##### Edited version
                net = conv_4 # [16, 4, 4, 24]
                shape = net.get_shape().as_list()
                self.d, d = shape[2], shape[2]

                _ = tf.range(1, delta=1/d)
                g = tf.stack( tf.meshgrid(_, _), -1 )[None] # [1, 4, 4, 2]
                # print ('\ng', g.shape, '\n') # after tile: g.shape = [shape[0], 4, 4, 2] = [16, 4, 4, 2]
                net = tf.concat([ net, tf.tile(g, [shape[0],1,1,1]) ], axis=-1) # (16, 4, 4, 26=24+2)

                # TODO: flatten
                all_singletons = [ net[:, int(i / d), int(i % d), :] for i in range(d*d) ]  # (16, batch, 26)

                all_pairs = [ tf.concat([all_singletons[i], all_singletons[j], q], axis=-1)  # q: (batch, 11)
                            for i in range(d*d) for j in range(d*d) ] # (256=16*16, batch, 63) 11+26+26=63

                net = tf.concat(all_pairs, axis=0) # net=(4096, 63)
                # print ('\nnet cat', net.shape, '\n')

                g_1 = fc(net, 256, activation_fn=tf.nn.relu)
                g_2 = fc(g_1, 256, activation_fn=tf.nn.relu)
                g_3 = fc(g_2, 256, activation_fn=tf.nn.relu)
                # print ('\ng_3', g_3.shape, '\n')

                net = tf.reshape(g_3, [d**4, shape[0], 256])
                all_g = net


                all_feature = [tf.concat([all_singletons[i], all_singletons[j]], axis=-1)
                            for i in range(d*d) for j in range(d*d)] # (256, batch, 52)
                all_question = tf.tile(q[None], [d**4, 1, 1]) #  (256, batch, 11)
                # ================================================================

##### End of Edited version
####  Original RN
                # d = conv_4.get_shape().as_list()[1]
                # all_g, all_feature, all_question = [], [], []

                # for i in range(d*d):
                #     o_i = conv_4[:, int(i / d), int(i % d), :]
                #     o_i = concat_coor(o_i, i, d)
                #     for j in range(d*d):
                #         o_j = conv_4[:, int(j / d), int(j % d), :]
                #         o_j = concat_coor(o_j, j, d)

                #         # ================================================================
                #         all_feature.append(tf.concat([o_i, o_j], axis=1))
                #         all_question.append(q)
                #         # ================================================================

                #         if i == 0 and j == 0:
                #             g_i_j = g_theta(o_i, o_j, q, reuse=False)
                #         else:
                #             g_i_j = g_theta(o_i, o_j, q, reuse=True)
                #         all_g.append(g_i_j)

                # all_g = tf.stack(all_g, axis=0)
####  End of Original RN

                # ====================================================================================================
                # Added for weights before the first MLP.

                all_question = tf.stack(all_question, axis=0)
                all_feature = tf.stack(all_feature, axis=0)
                all_shape = all_question.get_shape().as_list() #

                q_len = all_shape[2]

                converted_feature = tl.fully_connected(\
                    all_feature, q_len, reuse=tf.AUTO_REUSE, scope='convert_fc', activation_fn=tf.nn.tanh)

                # weights1, weights2 [d*d, batch, 1]
                # all_question [d*d, batch, 11]

                weights_1 = get_weights(all_question, converted_feature)

                weighted_feature_1 = tf.multiply(weights_1, converted_feature, name="weight_feature_1") * q_len

                weights_2 = get_weights(all_question, weighted_feature_1)

                weighted_feature_2 = tf.multiply(weights_2, weighted_feature_1, name="weight_feature_2") * q_len

                weights_3 = get_weights(all_question, weighted_feature_2)

                # check_tensor(all_g, "all_g") [d*d, batch, concated_feature_dim]

                # old_len = all_g.get_shape().as_list()[2]
                # features, questions = tf.split(all_g, [old_len - q_len,q_len], axis=2, name="split_old")
                # weighted_features = tf.multiply(features, weights_1, name='weight_feature_1')
                # final_features = weighted_features * \
                #                     (tf.reduce_mean(features, axis=2, keepdims=True) / \
                #                      tf.reduce_mean(weighted_features, axis=2, keepdims=True))
                # all_g = tf.concat([final_features, questions], axis=2)

                # weight_name = np.arange(1, np.prod(weights_3.get_shape().as_list())+1).reshape(tf.squeeze(weights_3).get_shape().as_list()).tolist()
                # tf.summary.scalar([['wei' + str(i)  for i in j] for j in weight_name], tf.squeeze(weights_3))

                tf.summary.histogram('weights_1', weights_1)
                tf.summary.histogram('weights_2', weights_2)
                tf.summary.histogram('weights_3', weights_3)

                self.weights_1, self.weights_2, self.weights_3 = tf.transpose(weights_1, perm=[1,0,2]), tf.transpose(weights_2, perm=[1,0,2]), tf.transpose(weights_3, perm=[1,0,2])

                all_g = tf.multiply(all_g, weights_3, name="weight_all_g") * all_g.get_shape().as_list()[0]

                # ====================================================================================================

                all_g = tf.reduce_mean(all_g, axis=0, name='all_g')

                return all_g

        def get_weights(all_q, all_f, scope='WEIGHTS'):
            # all_q, all_f[d*d, batch, 11]
            # weight [d*d, batch, 1]

            with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
                q_len = all_q.get_shape().as_list()[2] # 11
                h = tf.nn.tanh(tf.add(\
                    tl.fully_connected(all_f, q_len, biases_initializer=None, activation_fn=None, scope="IA_fc"),\
                    tl.fully_connected(all_q, q_len, activation_fn=None, scope="QA_fc"), name='weight_add'), name='weight_tanh')

                weight = tf.nn.softmax(\
                    tl.fully_connected(h, 1, activation_fn=None, scope="to_weight"), axis=0, name="weight_softmax")

            return weight


        def f_phi(g, scope='f_phi'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                fc_1 = fc(g, 256, name='fc_1')
                fc_2 = fc(fc_1, 256, name='fc_2')
                fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
                fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
                return fc_3

        g = CONV(self.img, self.q, scope='CONV')
        logits = f_phi(g, scope='f_phi')

        # print(logits.get_shape().as_list())

        self.all_preds = tf.nn.softmax(logits)
        self.loss, self.accuracy = build_loss(logits, self.a)

        # Add summaries
        def draw_iqa(img, q, target_a, pred_a, weights):
            d = self.d
            H, W = img.shape[:2]

            weights = weights.reshape(d*d, d*d)
            weights_a2b = np.mean(weights, axis=1).reshape(4,4)
            weights_b2a = np.mean(np.transpose(weights), axis=1).reshape(4,4)
            mean_w = (weights_a2b + weights_b2a) / 2
            mean_w = mean_w / np.max(mean_w)



            # print(mean_w.shape, img.shape)
            # print("===========")

            fig, ax = tfplot.subplots(figsize=(6, 6))
            ax.imshow(img, extent=[0,H,0,W])
            mid = ax.imshow(mean_w, cmap='jet',
                      alpha=0.5, extent=[0, H, 0, W])
            fig.colorbar(mid)
            ax.set_title(question2str(q))
            ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
            return fig

        try:

            tfplot.summary.plot_many('IQA/',
                                     draw_iqa, [self.img, self.q, self.a, self.all_preds, self.weights_1],
                                     max_outputs=4,
                                     collections=["plot_summaries"])
        except:
            pass

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/cross_entropy", self.loss)
        log.warn('Successfully loaded the model.')
Exemplo n.º 36
0
 def _sending_connection_item(self, connection_item):
     log.warn(("_send_request", connection_item.item.__dict__))
     self.connection.communicate(connection_item.item)
    def build(self, is_train=True):

        n = self.num_class

        # build loss and accuracy {{{
        def build_loss(d_real, d_real_logits, d_fake, d_fake_logits, label,
                       real_image, fake_image):
            alpha = 0.9
            real_label = tf.concat(
                [label, tf.zeros([self.batch_size, 1])], axis=1)
            fake_label = tf.concat(
                [(1 - alpha) * tf.ones([self.batch_size, n]) / n,
                 alpha * tf.ones([self.batch_size, 1])],
                axis=1)

            # Discriminator/classifier loss
            s_loss = tf.reduce_mean(huber_loss(label, d_real[:, :-1]))
            d_loss_real = tf.nn.softmax_cross_entropy_with_logits(
                logits=d_real_logits, labels=real_label)
            d_loss_fake = tf.nn.softmax_cross_entropy_with_logits(
                logits=d_fake_logits, labels=fake_label)
            d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)

            # Generator loss
            g_loss = tf.reduce_mean(tf.log(d_fake[:, -1]))

            # Weight annealing
            g_loss += tf.reduce_mean(huber_loss(
                real_image, fake_image)) * self.recon_weight

            GAN_loss = tf.reduce_mean(d_loss + g_loss)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(d_real[:, :-1], 1),
                                          tf.argmax(self.label, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return s_loss, d_loss_real, d_loss_fake, d_loss, g_loss, GAN_loss, accuracy

        # }}}

        # Generator {{{
        # =========
        G = Generator('Generator', self.h, self.w, self.c, self.norm_type,
                      self.deconv_type, is_train)

        fake_image = G(self.z)
        self.fake_image = fake_image
        # }}}

        # Discriminator {{{
        # =========
        D = Discriminator('Discriminator', self.num_class, self.norm_type,
                          is_train)
        d_real, d_real_logits = D(self.image)
        d_fake, d_fake_logits = D(fake_image)
        self.all_preds = d_real
        self.all_targets = self.label
        # }}}

        # for policy gradient
        self.d_output_q = d_fake[:, -1]
        self.g_output = fake_image
        self.g_weights = G.var_list

        self.S_loss, d_loss_real, d_loss_fake, self.d_loss, self.g_loss, GAN_loss, self.accuracy = \
            build_loss(d_real, d_real_logits, d_fake, d_fake_logits, self.label, self.image, fake_image)

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/GAN_loss", GAN_loss)
        tf.summary.scalar("loss/S_loss", self.S_loss)
        tf.summary.scalar("loss/d_loss", tf.reduce_mean(self.d_loss))
        tf.summary.scalar("loss/d_loss_real", tf.reduce_mean(d_loss_real))
        tf.summary.scalar("loss/d_loss_fake", tf.reduce_mean(d_loss_fake))
        tf.summary.scalar("loss/g_loss", tf.reduce_mean(self.g_loss))
        tf.summary.image("img/fake", fake_image)
        tf.summary.image("img/real", self.image, max_outputs=1)
        tf.summary.image("label/target_real",
                         tf.reshape(self.label, [1, self.batch_size, n, 1]))
        log.warn('\033[93mSuccessfully loaded the model.\033[0m')
Exemplo n.º 38
0
 def _func():
     log.warn("here!!!!!!!!!!!!!")
Exemplo n.º 39
0
if config.root_train_dir is None and len(config.train_dirs) == 0:
    raise ValueError('Set either root_train_dir or train_dirs')
if config.root_train_dir is not None and len(config.train_dirs) > 0:
    raise ValueError('Do not set both root_train_dir and train_dirs')

if config.root_train_dir is None:
    all_train_dirs = config.train_dirs
else:
    all_train_dirs = glob.glob(os.path.join(config.root_train_dir, 'vqa_*'))
all_train_dirs = sorted(all_train_dirs)

pure_test_qid2anno_path = os.path.join(config.qa_split_dir,
                                       'pure_test_qid2anno.pkl')
pure_test_qid2anno = cPickle.load(open(pure_test_qid2anno_path, 'rb'))

log.warn('all_train_dirs:')
for i, train_dir in enumerate(all_train_dirs):
    log.infov('{:02d}: {}'.format(i, train_dir))

for i_train_dir, train_dir in enumerate(all_train_dirs):
    log.warn('[{:02d}] train_dir: {}'.format(i_train_dir, train_dir))

    eval_dirs = glob.glob(
        os.path.join(train_dir, 'model-*_eval_{}_*'.format(config.split)))
    eval_iter2dir = {
        int(e.split('model-')[1].split('_eval')[0]): e
        for e in eval_dirs
    }
    iters = sorted(eval_iter2dir)

    collect_results = defaultdict(list)
config.vfeat_path = os.path.join(config.tf_record_memft_dir,
                                 'vfeat_bottomup_36.hdf5')
f = h5py.File(config.vfeat_path, 'w')

image_features = f.create_dataset('image_features',
                                  (len(image_id2idx), NUM_BOXES, FEATURE_DIM),
                                  'f')
normal_boxes = f.create_dataset('normal_boxes',
                                (len(image_id2idx), NUM_BOXES, 4), 'f')
num_boxes = np.zeros([len(image_id2idx)], dtype=np.int32)
spatial_features = f.create_dataset('spatial_features',
                                    (len(image_id2idx), NUM_BOXES, 6), 'f')

for file_name in BOTTOM_UP_FILE_NAMES:
    log.warn('process: {}'.format(file_name))

    tsv_in_file = open(os.path.join(config.bottom_up_dir, file_name), 'r+b')

    reader = csv.DictReader(tsv_in_file,
                            delimiter='\t',
                            fieldnames=(int_field + np_field))
    for item in tqdm(reader, desc='processing reader',
                     total=len(image_id2idx)):
        for field in int_field:
            item[field] = int(item[field])
        for field in np_field:
            item[field] = np.frombuffer(base64.decodestring(item[field]),
                                        dtype=np.float32).reshape(
                                            (item['num_boxes'], -1))
Exemplo n.º 41
0
def self_test(args):
    global model, data_sets

    session = tf.Session(config=tf.ConfigProto(
        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
        device_count={'GPU': True},  # self-testing: NO GPU, USE CPU
    ))

    log.warn('Loading %s input data ...', args.dataset)
    if args.dataset == 'salicon':
        #salicon_input_data.

        # data_sets = salicon_input_data.read_salicon_data_sets(
        #     98, 98, 49, 49, np.float32,
        #     use_example=False, # only tens
        #     use_val_split=True,
        # ) # self test small only
        salicon_data = salicon_input_data.SaliconData(
            98,
            98,
            49,
            49,
            np.float32,
            use_example=False,  # only tens
            use_val_split=True,
        )
        data_sets = salicon_data.build()

    elif args.dataset == 'crc':
        data_sets = crc_input_data.read_crc_data_sets(98,
                                                      98,
                                                      49,
                                                      49,
                                                      np.float32,
                                                      use_cache=True)
    else:
        raise ValueError('Unknown dataset : %s' % args.dataset)

    print 'Train', data_sets.train
    print 'Validation', data_sets.valid

    log.warn('Building Model ...')
    # default configuration as of now
    config = BaseModelConfig()
    config.train_dir = args.train_dir
    if args.train_tag:
        config.train_tag = args.train_tag

    config.batch_size = 128
    config.use_flip_batch = True
    #config.initial_learning_rate = 0.03
    config.initial_learning_rate = 0.00003
    config.optimization_method = 'adam'
    config.steps_per_evaluation = 7000  # for debugging

    if args.learning_rate is not None:
        config.initial_learning_rate = float(args.learning_rate)
    if args.learning_rate_decay is not None:
        config.learning_rate_decay = float(args.learning_rate_decay)
    if args.batch_size is not None:
        config.batch_size = int(args.batch_size)

    if args.max_steps:
        config.max_steps = int(args.max_steps)

    if args.dataset == 'crc':
        config.batch_size = 2  # because of T~=35
        config.steps_per_evaluation = 200

    config.dump(sys.stdout)

    log.warn('Start Fitting Model ...')
    model = SaliencyModel(session, data_sets, config)
    print model

    model.fit()

    log.warn('Fitting Done. Evaluating!')
    model.generate_and_evaluate(data_sets.test)
Exemplo n.º 42
0
    def __init__(self, batch, config, is_train=True, image_features=None):
        self.batch = batch
        self.config = config
        self.image_dir = config.image_dir
        self.is_train = is_train

        self.word_weight_dir = getattr(config, 'pretrain_word_weight_dir',
                                       None)
        if self.word_weight_dir is None:
            log.warn('word_weight_dir is None')

        self.losses = {}
        self.report = {}
        self.mid_result = {}
        self.output = {}
        self.heavy_output = {}
        self.vis_image = {}

        self.vocab = cPickle.load(open(config.vocab_path, 'rb'))
        self.answer_dict = cPickle.load(
            open(os.path.join(config.tf_record_dir, 'answer_dict.pkl'), 'rb'))
        self.num_answer = len(self.answer_dict['vocab'])
        self.num_train_answer = self.answer_dict['num_train_answer']
        self.train_answer_mask = tf.expand_dims(tf.sequence_mask(
            self.num_train_answer, maxlen=self.num_answer, dtype=tf.float32),
                                                axis=0)
        self.test_answer_mask = 1.0 - self.train_answer_mask
        self.obj_answer_mask = tf.expand_dims(tf.constant(
            self.answer_dict['is_object'], dtype=tf.float32),
                                              axis=0)
        self.attr_answer_mask = tf.expand_dims(tf.constant(
            self.answer_dict['is_attribute'], dtype=tf.float32),
                                               axis=0)

        self.glove_map = modules.LearnGloVe(self.vocab)
        self.answer_exist_mask = modules.AnswerExistMask(
            self.answer_dict, self.word_weight_dir)
        self.answer_non_exist_mask = 1.0 - self.answer_exist_mask

        if self.config.debug:
            self.features, self.spatials, self.normal_boxes, self.num_boxes, \
                self.max_box_num, self.vfeat_dim = get_dummy_data()
        elif image_features is None:
            log.infov('loading image features...')
            with h5py.File(config.vfeat_path, 'r') as f:
                self.features = np.array(f.get('image_features'))
                log.infov('feature done')
                self.spatials = np.array(f.get('spatial_features'))
                log.infov('spatials done')
                self.normal_boxes = np.array(f.get('normal_boxes'))
                log.infov('normal_boxes done')
                self.num_boxes = np.array(f.get('num_boxes'))
                log.infov('num_boxes done')
                self.max_box_num = int(f['data_info']['max_box_num'].value)
                self.vfeat_dim = int(f['data_info']['vfeat_dim'].value)
            log.infov('done')
        else:
            self.features = image_features['features']
            self.spatials = image_features['spatials']
            self.normal_boxes = image_features['normal_boxes']
            self.num_boxes = image_features['num_boxes']
            self.max_box_num = image_features['max_box_num']
            self.vfeat_dim = image_features['vfeat_dim']

        self.build()