def tf_encode(container):
        """
        LSB matching algorithm (+-1 embedding)
        :param container: tf tensor shape (batch_size, width, height, chan)
        :param information: array with int bits
        :param stego: name of image with hidden message
        """
        with tf.variable_scope('Stego'):

            n, width, height, chan = tuple(map(int, container._shape))

            information = BaseStego.get_information(n, 50)
            # logger.debug('Information to hide', information)

            mask = np.zeros(list(container.get_shape()))

            print('Num of images: %s' % n)
            for img_idx in range(n):
                print(img_idx)

                for i, bit in enumerate(information[img_idx]):
                    ind, jnd = i // width, i - width * (i // width)

                    if tf.to_int32(container[img_idx, ind, jnd, 0]) % 2 != bit:
                        if np.random.randint(0, 2) == 0:
                            # tf.assign_sub(container[img_idx, ind, jnd, 0], 1)
                            mask[img_idx, ind, jnd, 0] += 1
                        else:
                            # tf.assign_add(container[img_idx, ind, jnd, 0], 1)
                            mask[img_idx, ind, jnd, 0] -= 1

            logger.debug('Finish encoding')
            return tf.add(container, mask)
Пример #2
0
def block(blockhash):
    log_ip(request, inspect.stack()[0][3])
    try:
        block = Block.from_json(get_block_from_db(blockhash)).object()
    except Exception as e:
        logger.debug("BLOCK/blockhash: " + str(e))
        return template("error.html")
    return template("block.html", block=block)
Пример #3
0
 def execute_script(self, file_name):
     logger.debug("Executing script: " + file_name)
     print("Executing script: " + file_name)
     with open(file_name) as script:
         for line in script:
             print("\t\t" + line.strip())
             args = line.split()
             self.command_handler("server", args, admin=True)
Пример #4
0
    def get(self, request):
        snippet_qs = Snippet.objects.order_by('-update_dt')
        snippet_list = []
        for snippet in snippet_qs:
            snippet_list.append(snippet.to_simple_dict())

        logger.debug('query snippets|%s', len(snippet_list))
        return SuccessResponse(snippet_list)
Пример #5
0
 def reprocess_api(self):
     logger.debug('Reprocess dataset: %s' % self.dataset_id)
     self.database \
             .mongodb_dataset \
             .update({'_id': self.dataset['_id']}, \
                     {'$set': {'status': AnnotationServer.STATUS_PROCESSING}})
     self.do_process_dataset()
     return AnnotationServer.response_success(result='Processing dataset')
Пример #6
0
def delete_module(module_path):
    """ Delete Module after it has been processed """
    try:
        del sys.modules[module_path]
    except Exception as exc:
        logger.debug('Could not delete: {}'.format(module_path))
    else:
        logger.debug('Deleted Module: {}'.format(module_path))
Пример #7
0
    def get(self, request, id):
        try:
            snippet = Snippet.objects.get(id=id)
        except Snippet.DoesNotExist:
            logger.warning('snippet not exist|%d', id)
            return ObjectNotExistResponse()

        logger.debug('query snippet|%d', id)
        return SuccessResponse(snippet.to_dict())
Пример #8
0
def _bookable_offers(query):
    beginning_date_is_in_the_future = (Stock.beginningDatetime > datetime.utcnow())
    no_beginning_date = Stock.beginningDatetime == None
    query = query.filter(beginning_date_is_in_the_future | no_beginning_date)
    logger.debug(lambda: '(reco) offers in the future .count ' + str(query.count()))

    query = _filter_bookable_offers_for_discovery(query)
    logger.debug(lambda: '(reco) bookable .count ' + str(query.count()))
    return query
Пример #9
0
 def dump(cls):
     if not cls.no_dump_mode:
         if logger is not None:
             logger.debug('Dumping save to disk')
         p = xdrlib.Packer()
         p.pack_array(cls.data, p.pack_int)
         b = p.get_buffer()
         with open(cls.path, 'wb') as file:
             file.write(b)
Пример #10
0
 def __send_order(self, socket, order):
     socket.send(order.to_json())
     ret = socket.recv()
     data = json.loads(ret.decode('utf-8'))
     if 0 != data['code']:
         logger.debug(order.to_json())
         logger.debug(data['msg'])
     else:
         pass
Пример #11
0
def get_thing_type_and_extra_data_from_titelive_type(titelive_type):
    if titelive_type == 'A':
        return None, None
    elif titelive_type == 'BD':
        return str(ThingType.LIVRE_EDITION), BookFormat.BANDE_DESSINEE.value
    elif titelive_type == 'BL':
        return str(ThingType.LIVRE_EDITION), BookFormat.BEAUX_LIVRES.value
    elif titelive_type == 'C':
        return None, None
    elif titelive_type == 'CA':
        return None, None
    elif titelive_type == 'CB':
        return None, None
    elif titelive_type == 'CD':
        return None, None
    elif titelive_type == 'CL':
        return None, None
    elif titelive_type == 'DV':
        return None, None
    elif titelive_type == 'EB':
        return None, None
    elif titelive_type == 'K7':
        return None, None
    elif titelive_type == 'LA':
        return None, None
    elif titelive_type == 'LC':
        return str(ThingType.LIVRE_EDITION), BookFormat.LIVRE_CASSETTE.value
    elif titelive_type == 'LD':
        return str(ThingType.LIVRE_EDITION), BookFormat.LIVRE_AUDIO.value
    elif titelive_type == 'LE':
        return None, None
    elif titelive_type == 'LR':
        return None, None
    elif titelive_type == 'LT':
        return None, None
    elif titelive_type == 'LV':
        return None, None
    elif titelive_type == 'M':
        return str(ThingType.LIVRE_EDITION), BookFormat.MOYEN_FORMAT.value
    elif titelive_type == 'O':
        return None, None
    elif titelive_type == 'P':
        return str(ThingType.LIVRE_EDITION), BookFormat.POCHE.value
    elif titelive_type == 'PC':
        return None, None
    elif titelive_type == 'PS':
        return None, None
    elif titelive_type == 'R':
        return str(ThingType.LIVRE_EDITION), BookFormat.REVUE.value
    elif titelive_type == 'T' \
            or titelive_type == 'TL':
        return str(ThingType.LIVRE_EDITION), None
    elif titelive_type == 'TR':
        return None, None
    else:
        logger.debug(" WARNING: Unknown titelive_type: " + titelive_type)
        return None, None
Пример #12
0
def manager_meta_job(group_id):
    """
    添加, 删除, 修改job
    该方法中使用了scheduler, 不能被序列化, 只能放到该类中
    """
    logger.debug("starting manager_meta_job...")
    subject_dao = SubjectManualInfo()
    jobs = subject_dao.read_jobs(group_id)

    # 遍历, 处理Job
    for job in jobs:
        process_status = job['process_status']
        group_id = job['group_id']
        subject_id = job['subject_id']
        subject_name = job['subject_name']
        monitor_type = job['monitor_type']
        begin_time = job['begin_time']
        end_time = job['end_time']
        exec_type = job['exec_type']
        exec_week = job['exec_week']
        exec_time = job['exec_time']
        cluster_type = job['cluster_type']
        logger.debug('{subject_id: %s, process_status: %s}.' % (subject_id, process_status))

        job_id = str(subject_id)
        if process_status == 3:  # delete job
            try:
                scheduler.remove_job(job_id)
            except Exception, e:
                logger.error('remove error, job not exists. {job_id: %s}' % (job_id,))

        if process_status == 1 or process_status == 2:  # add job
            args = (group_id, subject_id, subject_name, monitor_type, begin_time, end_time, exec_time, cluster_type)
            if exec_type == 0:  # 一次数
                args += ('cluster_result', )
                run_date = datetime.strptime(exec_time, '%Y-%m-%d %H:%M')
                scheduler.add_job(cluster_job, trigger='date', run_date=run_date,
                                  id=job_id, args=args, replace_existing=True)

            elif exec_type == 1:  # 天
                args += ('cluster_result', )
                dt = datetime.strptime(exec_time, '%H:%M')
                scheduler.add_job(cluster_job, trigger='cron', hour=dt.hour, minute=dt.minute,
                                  id=job_id, args=args, replace_existing=True)

            elif exec_type == 2:  # 周
                args += ('cluster_result_week', )
                dt = datetime.strptime(exec_time, '%H:%M')
                scheduler.add_job(cluster_job, trigger='cron', hour=dt.hour, minute=dt.minute,
                                  day_of_week=exec_week,
                                  id=job_id, args=args, replace_existing=True)

            elif exec_type == 3:  # 月
                args += 'cluster_result_month',
                dt = datetime.strptime(exec_time, '%d %H:%M')
                scheduler.add_job(cluster_job, trigger='cron', day=dt.day, hour=dt.hour, minute=dt.minute,
                                  id=job_id, args=args, replace_existing=True)
Пример #13
0
 def register_api(self):
     logger.debug(self.request.json)
     face = self.request.json.get('face')
     client_id = self.request.json.get('client_socket_id')
     send_at = self.request.json.get('sent_at')
     logger.debug('POST /register name: %s, client: %s, timestamp: %s' %
                  (face, client_id, send_at))
     self.register_queue.put((face, client_id, send_at))
     return DemoRegisterServer.response_success('Registering')
Пример #14
0
    def _create_voting_table(self):
        """Filling table with assignment scores.

        Create table which represents paired label assignments, i.e. each
        cell comprises score for corresponding label assignment"""
        size = max(len(np.unique(self.gt_labels_subset)),
                   len(np.unique(self.predicted_labels)))
        self._voting_table = np.zeros((size, size))

        for idx_gt, gt_label in enumerate(np.unique(self.gt_labels_subset)):
            self._gt_label2index[gt_label] = idx_gt
            self._gt_index2label[idx_gt] = gt_label

        if len(self._gt_label2index) < size:
            for idx_gt in range(len(np.unique(self.gt_labels_subset)), size):
                gt_label = idx_gt
                while gt_label in self._gt_label2index:
                    gt_label += 1
                self._gt_label2index[gt_label] = idx_gt
                self._gt_index2label[idx_gt] = gt_label

        for idx_pr, pr_label in enumerate(np.unique(self.predicted_labels)):
            self._pr_label2index[pr_label] = idx_pr
            self._pr_index2label[idx_pr] = pr_label

        if len(self._pr_label2index) < size:
            for idx_pr in range(len(np.unique(self.predicted_labels)), size):
                pr_label = idx_pr
                while pr_label in self._pr_label2index:
                    pr_label += 1
                self._pr_label2index[pr_label] = idx_pr
                self._pr_index2label[idx_pr] = pr_label

        for idx_gt, gt_label in enumerate(np.unique(self.gt_labels_subset)):
            if gt_label in list(self.exclude.keys()):
                continue
            gt_mask = self.gt_labels_subset == gt_label
            for idx_pr, pr_label in enumerate(np.unique(
                    self.predicted_labels)):
                if pr_label in list(self.exclude.values()):
                    continue
                self._voting_table[idx_gt, idx_pr] = \
                    np.sum(self.predicted_labels[gt_mask] == pr_label, dtype=float)
        for key, val in self.exclude.items():
            # works only if one pair in exclude
            assert len(self.exclude) == 1
            try:
                self._voting_table[
                    self._gt_label2index[key],
                    self._pr_label2index[val[0]]] = size * np.max(
                        self._voting_table)
            except KeyError:
                logger.debug('No background!')
                self._voting_table[self._gt_label2index[key],
                                   -1] = size * np.max(self._voting_table)
                self._pr_index2label[size - 1] = val[0]
                self._pr_label2index[val[0]] = size - 1
Пример #15
0
 def add_altitude_callback(self, callback: Callable):
     logger.debug('Altitude callback')
     if self.vessel_name is not '?':
         altitude_stream = self.client.add_stream(
             getattr, self.client.space_center.active_vessel.flight(),
             'mean_altitude')
         altitude_stream.rate = 10
         altitude_stream.add_callback(callback)
         altitude_stream.start()
Пример #16
0
 def get_phone_number_by_user_name(self, user_name):
     # TODO: Mocked because it's difficult to pull this from slack
     # TODO: Update test if we pull this from slack later
     team = Team()
     for member in team.members:
         logger.debug(member)
         logger.debug("{0}, {1}".format(member["user_name"], user_name))
         if member["user_name"] == user_name:
             return member["name"], member["phone_number"]
Пример #17
0
 def channel_from_phone_number(self, phone_number):
     """
     Fetches a clients channel (i.e. itemId) from Unomi based on phone number
     """
     profile = self.profile_search(phone_number)
     logger.debug("PROFILE: {0}".format(profile))
     if profile:
         return profile["itemId"]
     else:
         return channel_token(phone_number)
Пример #18
0
 def pages(self) -> int:
     logger.debug(
         f"Find total pages using `{BooksPageLocators.TOTAL_PAGES}`")
     total_pages = self.soup.select_one(
         BooksPageLocators.TOTAL_PAGES).string
     pattern = r"Page [0-9]+ of ([0-9]+)"
     match = re.search(pattern, total_pages)
     page_count = int(match.group(1))
     logger.info(f"Total number of pages: `{page_count}`.")
     return page_count
Пример #19
0
    def disable(self):
        """Assign diasble status E (boolean) on the selected channel.
        
        :return: True if successful or False otherwise.
        """

        self.select()
        logger.debug("Disabled Device : {} , Channel : {}".format(
            self.device, self.id))
        return bitlib.BL_Enable(0)
Пример #20
0
 def find_nearest_api(self):
     image_id = self.request.json.get('image_id')
     logger.debug('FindNearest, image_id: %s' % image_id)
     try:
         result = self.database.nearest_image_ids_for_image_id(image_id)
         logger.debug(result)
         return self.response_success(result=result)
     except Exception as e:
         logger.error(e, exec_info=True)
         return self.response_error('Find nearest failed')
Пример #21
0
 def price(self) -> (str, float):
     logger.debug("Finding book price...")
     locator = BookLocators.PRICE
     elem = self.parent.select_one(locator).string
     pattern = r"(.)([0-9]+\.[0-9]{2})"
     match = re.search(pattern, elem)
     Price = namedtuple("Price", ["currency", "number"])
     price_instance = Price(match.group(1), float(match.group(2)))
     logger.debug(f"Price found: `{price_instance.currency}`, `{price_instance.number}`.")
     return price_instance
    def __init__(self, server):
        self.server = server
        self.inactive_time_start = datetime.datetime.now()
        self.inactive_time = 0
        self.inactive_timer = False
        self.time_interval = 6
        self.last_wave = 0

        threading.Thread.__init__(self)
        logger.debug("Mapper for " + server.name + " initialised")
Пример #23
0
 def request_task(peers, url, data):
     for peer in peers:
         try:
             requests.post(get_peer_url(peer) + url,
                           data=data,
                           timeout=(5, 1))
         except Exception as e:
             logger.debug(
                 "Server: Requests: Error while sending data in process" +
                 str(peer))
Пример #24
0
def department_or_national_offers(query, departement_codes):
    if '00' in departement_codes:
        return query

    query = query.filter(
        Venue.departementCode.in_(departement_codes) | (Offer.isNational == True)
    )

    logger.debug(lambda: '(reco) departement .count ' + str(query.count()))
    return query
 def read_contract_output(self, contract_address: str) -> Optional[str]:
     if contract_address.lower() == 'self':
         contract_address = Wallet.gen_public_key(int(self.current_contract_priv_key))
     if not is_valid_contract_address(contract_address):
         raise Exception(f"Contract Address {contract_address} is invalid contract address")
     _, co, _ = get_cc_co_cp_by_contract_address(contract_address)
     co = str(co)
     co = co.replace("'", "\\'") if co != '' else None
     logger.debug(f"Read output of contract {contract_address}: {co}")
     return co
Пример #26
0
    def __init__(self, name):
        self.sqlite_db_file = name + "_db" + ".sqlite"

        if not path.exists(self.sqlite_db_file):
            self.build_schema()
        self.conn = sqlite3.connect(self.sqlite_db_file,
                                    check_same_thread=False)
        self.cur = self.conn.cursor()

        logger.debug("Database for " + name + " initialised")
Пример #27
0
def mining_thread_task():
    while True:
        if not miner.is_mining() and not consts.NO_MINING:
            try:
                miner.start_mining(BLOCKCHAIN.mempool, BLOCKCHAIN.active_chain,
                                   MY_WALLET)
            except Exception as e:
                miner.stop_mining()
                logger.debug(f"Miner: Error while mining: {type(e)}({str(e)})")
        time.sleep(consts.MINING_INTERVAL_THRESHOLD // 2)
Пример #28
0
 def send(self, params=None, data=None, **kwargs):
     response = self.session.request(method=self.method,
                                     url=self.url,
                                     params=params,
                                     data=data,
                                     **kwargs)
     response.encoding = 'utf-8'
     logger.debug('{0} {1}'.format(self.method, self.url))
     logger.debug('请求成功: {0}\n{1}'.format(response, response.text))
     return response
Пример #29
0
def execute_cleaners(df):
    cleaners = dynamic_loading.objects_from_modules(DATA_CLEANERS_DIR, 'clean')
    rows_count = len(df)
    for clean_func in cleaners:
        df = clean_func(df)
        logger.debug('Row count is {} '
                     '({} Cleaned {} rows)'.format(len(df),
                                                   clean_func.__module__,
                                                   rows_count - len(df)))
        rows_count = len(df)
    return df
    def train(self, counter=1, gen_dirs=()):
        if self.conf.need_to_load:
            self.load(self.conf.checkpoint_dir, step=counter)

        data = self.data
        logger.info('Total amount of images: %s' % len(data))
        # np.random.shuffle(data)

        tf.initialize_all_variables().run()

        # counter = 1
        start_time = time.time()
        batch_idxs = min(len(data), self.conf.train_size) / self.conf.batch_size

        stego_accuracy = 0

        accuracies = []
        accuracies_steps = []

        logger.debug('Starting updating')
        for epoch in range(self.conf.epoch):
            losses = []

            np.random.shuffle(data)

            logger.info('Starting epoch %s' % epoch)

            for idx in range(0, int(batch_idxs)):
                batch_files = data[idx * self.conf.batch_size:(idx + 1) * self.conf.batch_size]
                batch = [get_image(batch_file, self.conf.image_size)
                         for batch_file in batch_files]
                batch_images = np.array(batch).astype(np.float32)

                batch_targets = self.get_targets(batch_files)

                self.sess.run(self.optimize, feed_dict={self.images: batch_images, self.target: batch_targets})
                loss = self.loss.eval({self.images: batch_images, self.target: batch_targets})

                losses.append(loss)

                # logger.debug("[ITERATION] Epoch [%2d], iteration [%4d/%4d] time: %4.4f, Loss: %8f, accuracy: %8f" %
                #              (epoch, idx, batch_idxs, time.time() - start_time, loss, stego_accuracy))

                counter += 1

                if counter % 300 == 0:
                    logger.info('------')

                    stego_accuracy = self.accuracy(n_files=-1, test_dir=self.test_dir)
                    logger.info('[TEST] Epoch {:2d} accuracy: {:3.1f}%'.format(epoch + 1, 100 * stego_accuracy))

                    for gen_dir in gen_dirs:
                        gen_accuracy = self.accuracy(n_files=-1, test_dir=gen_dir)
                        logger.info('[GEN_TEST] Folder {}, accuracy: {:3.1f}%'.format(gen_dir, 100 * gen_accuracy))
Пример #31
0
def update_domains(update_all=False):
    logger.debug('start to update domains')
    try:
        update_butian_src_domains(update_all)
    except RequestException:
        logger.error('update failed with bad network, please retry')
    except JSONDecodeError:
        logger.error('update failed with json decode error, please retry')
    except Exception:
        logger.error('unexpect error occured, please retry')
    logger.debug('finish update domains')
Пример #32
0
 def action_record(self):
     if self.player.record_position:
         logger.debug(
             'stop recording player position, it will be saved to "record.npy"'
         )
         self.player.stop_recording_position()
         np.save('records.npy',
                 self.player.position_records[:self.player.last_index])
     else:
         logger.debug('start recording player position')
         self.player.start_recording_position()
Пример #33
0
def getAppData(key):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    response = AppDataCore.getAppDataForKey(key)

    if response == None:
        error = "Invalid data requested"

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Пример #34
0
def getProfileInfo(profileName):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    appData = AppDataCore.getAppData()
    response = appData["profile"].get(profileName, None)

    if response == None:
        error = "Profile %s does not exist." % (profileName)

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Пример #35
0
def readfile():
    logger.info("Hitting URL %s", request.url)

    rawContent = None
    filePath = request.args.get("file_path")
    error, response = FileIO.readFile(filePath)

    if error:
        response = None

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Пример #36
0
    def _validateInput(self, inputParams):

        if not util.isEmail(inputParams.get('username')):
            logger.debug('Invalid email address:' + inputParams.get('username'))
            self._sendError(self.USERNAME_ERRORCODE, 'Invalid email address')
            return False

        if not util.isPassword(inputParams.get('password')):
            logger.debug('Invalid password:'******'password'))
            self._sendError(self.PASSWORD_ERRORCODE, 'Invalid password')
            return False

        return True
    def load(self, checkpoint_dir, step):
        model_dir = "%s_%s" % (self.conf.model_name, self.conf.batch_size)
        checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
        try:
            ckpt_name = "%s_%s.ckpt-%s" % (self.conf.model_name, step, step)

            logger.info("[LOADING] step: %s, name: %s" % (step, ckpt_name))
            self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
        except Exception as e:
            logger.debug(e)
            ckpt_name = "StegoDCGAN-%s" % (step)

            logger.info("[LOADING] step: %s, name: %s" % (step, ckpt_name))
            self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
Пример #38
0
def register_dev(imei, platform, dev_type):
    string = imei + platform + dev_type
    did = uuid.uuid3(uuid.NAMESPACE_DNS, str(string)).hex
    logger.debug('register device info:')
    logger.debug('\timei: %s, platform: %s, dev_type: %s, did: %s' % (imei, platform, dev_type, did))
    try:
        with db_session:
            dev = Device.get(did=did)
            if not dev:
                mast_secret = uuid.uuid4().hex
                dev = Device(did=did, platform=platform, dev_type=dev_type, mast_secret=mast_secret)
            return SuccessPage(dev.to_dict())
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Пример #39
0
def addProfile(profile):
    logger.info("Hitting URL %s", request.url)

    error, response = None, None
    action = request.form.get("action")

    if action == "add":
        error, response = TagCore.addProfile(profile)
    elif action == "delete":
        error, response = TagCore.removeProfile(profile)

    if error:
        response = None

    logger.debug("Error: %s", error)
    logger.debug("Response: %s", response)
    return jsonify(response=response, error=error)
Пример #40
0
    def run(self, stop_on_failure=False):
        fail_cnt = 0
        metadata = None
        getter = self.iter_get()
        try:
            while True:
                try:
                    # Some getters require metadata from the last response to figure out the
                    # pagination of the next one
                    data_str = getter.send(metadata)
                    logger.debug('Got: %s' % data_str)
                    logger.info('Got records')

                    data = self.parse(data_str)
                    logger.debug('Parsed into: %s' % str(data))

                    metadata, records = self.normalize(data)
                    logger.debug('Normalized: \nMetadata: %s \nRecords: %s' % \
                        (str(metadata), str(records)))
                    logger.info('Normalized %d records' % len(records))

                    logger.info('Storing')
                    self.store(metadata, records)
                except Exception, e:
                    if isinstance(e, StopIteration):
                        raise

                    if stop_on_failure or fail_cnt > 100:
                        raise
                    else:
                        fail_cnt += 1
                        sys.stderr.write("Failed: %s" % str(e))
                        continue
        except StopIteration:
            pass
Пример #41
0
def main():
    logger.info('Started.')

    gmail = build('gmail', 'v1', http=make_google())
    gmail_send = gmail.users().messages().send
    email_messages = {}

    with open(CONFIG_FILE, 'r') as file:
        users = json.load(file)
    logger.debug('User loaded')

    for endpoint in ENDPOINTS:
        logger.info('Processing endpoint: %s', endpoint.name)
        endpoint.request()
        if not make_cache(endpoint.name, endpoint._text):
            logger.debug('Content is same with cached. Skipping.')
            continue
        for user in users:
            logger.debug('Processing user: %s', user['name'])
            if user['name'] in endpoint:
                logger.debug('User in endpoint.')
                message = make_message('match', regexp=user['name'],
                                       name=endpoint.name, url=endpoint.url)
                email_messages.setdefault(user['email'], []).append(message)

    logger.info('Done fetching endpoints. Now drafting email.')

    queue = []
    for recepient, messages in email_messages.items():
        message_text = make_message('frame', matches='\n'.join(messages))
        message = MIMEText(message_text)
        message['to'] = recepient
        # message['from'] = sender
        message['subject'] = MAIL_SUBJECT

        # The byte/str drama, you know.
        raw = base64.b64encode(message.as_string().encode())
        queue.append({'raw': raw.decode()})

    logger.info('%d email(s) have been drafted. Sending.', len(queue))
    consume_message_queue(gmail_send, queue)
    logger.info('Done.')
    def train(self):
        if self.conf.need_to_load:
            self.load(self.conf.checkpoint_dir)

        data = glob(os.path.join(self.conf.data, "*.%s" % self.conf.img_format))
        logger.info('Total amount of images: %s' % len(data))
        # np.random.shuffle(data)

        d_fr_optim = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        d_fr_optim = d_fr_optim.minimize(self.d_fr_loss, var_list=self.d_fr_vars)

        d_s_n_optim = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        d_s_n_optim = d_s_n_optim.minimize(self.d_stego_loss_total, var_list=self.d_s_n_vars)

        g_optim_fake = tf.train.AdamOptimizer(self.conf.learning_rate, beta1=self.conf.beta1)
        g_optim_fake = g_optim_fake.minimize(self.g_loss, var_list=self.g_vars)

        # g_optim_stego = tf.train.AdamOptimizer(0.000005, beta1=0.9)
        # g_optim_stego = g_optim_stego.minimize(self.g_loss_stego, var_list=self.g_vars)

        merged = tf.merge_all_summaries()
        train_writer = tf.train.SummaryWriter('./logs_sgan', self.sess.graph)

        tf.initialize_all_variables().run()

        sample_z = np.random.uniform(-1, 1, size=(self.sample_size, self.z_dim))
        sample_files = data[0:self.sample_size]
        sample = [get_image(sample_file, self.image_size, need_transform=True) for sample_file in sample_files]
        sample_images = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        batch_idxs = min(len(data), self.conf.train_size) / self.conf.batch_size

        logger.debug('Starting updating')
        for epoch in range(self.conf.epoch):
            stego_losses, fake_real_losses, generator_losses = [], [], []

            logger.info('Starting epoch %s' % epoch)

            for idx in range(0, int(batch_idxs)):
                batch_files = data[idx * self.conf.batch_size:(idx + 1) * self.conf.batch_size]
                batch = [get_image(batch_file, self.image_size, need_transform=True) for batch_file in batch_files]
                batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(-1, 1, [self.conf.batch_size, self.z_dim]).astype(np.float32)

                self.sess.run(d_fr_optim, feed_dict={self.images: batch_images, self.z: batch_z})
                self.sess.run(d_s_n_optim, feed_dict={self.images: batch_images, self.z: batch_z})

                self.sess.run(g_optim_fake, feed_dict={self.z: batch_z})
                self.sess.run(g_optim_fake, feed_dict={self.z: batch_z})

                # # if epoch > 5:
                # self.sess.run(g_optim_stego, feed_dict={self.z: batch_z})

                # errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                # errD_real = self.d_loss_real.eval({self.images: batch_images})
                #
                # errD_stego = self.d_loss_stego.eval({self.z: batch_z})
                # errD_n_stego = self.d_loss_nonstego.eval({self.z: batch_z})
                #
                # errG = self.g_loss.eval({self.z: batch_z})
                #
                # fake_real_losses.append(errD_fake + errD_stego)
                # stego_losses.append(errD_stego + errD_n_stego)
                # generator_losses.append(errG)
                #
                logger.debug("[ITERATION] Epoch [%2d], iteration [%4d/%4d] time: %4.4f" %
                             (epoch, idx, batch_idxs, time.time() - start_time))
                # logger.debug('[LOSS] Real/Fake: %.8f' % (errD_fake + errD_real))
                # logger.debug('[LOSS] Stego/Non-Stego: %.8f' % (errD_stego + errD_n_stego))
                # logger.debug('[LOSS] Generator: %.8f' % errG)

                counter += 1

                if np.mod(counter, 1000) == 0:
                    self.save(self.conf.checkpoint_dir, counter)

                if np.mod(counter, 300) == 0:
                    logger.info('Save samples')
                    samples, d_loss, g_loss = self.sess.run(
                        [self.sampler, self.d_fr_loss, self.g_loss_fake,
                         ],
                        feed_dict={self.z: sample_z, self.images: sample_images}
                    )
                    save_images_to_one(samples, [8, 8], './samples/train_%s_%s.png' % (epoch, idx))