def save_cache(self, filename, data, mtime, expire_time=utils.CACHE_KEEP_TIME, unzip=False):
        if not filename or not data or mtime is None:
            return
        db = sqlite3.connect(utils.DSM_CACHE_PATH)
        cursor = db.cursor()

        if expire_time != 0:
            expire = expire_time + time()
        else:
            expire = 0

        try:
            if unzip:
                pdata = pickle.dumps(data)
            else:
                pdata = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)

            sql = "REPLACE INTO {} (name_hash,add_time, expire,add_data) VALUES (?,?,?,?)".format(self.table_name)
            cursor.execute(sql, (self.__url_md5(filename), mtime, expire, pdata))
            db.commit()
            utils.add_log(self.logger, 'info', 'save_cache', filename, mtime, expire)
        except Exception as e:
            utils.add_log(self.logger, 'error', 'save_cache', filename, mtime, expire, e)
        finally:
            cursor.close()
            db.close()
Ejemplo n.º 2
0
def iter_results_pages(region, start_page, num_iter, user_agent):
    try:
        cookies, p_auth = request_home(user_agent=user_agent)
        results, total_num_pages = request_results(p_auth=p_auth,
                                                   cookies=cookies,
                                                   region=region,
                                                   user_agent=user_agent)
    except Exception as e:
        utils.add_log(region, e, is_exception=True)
        return False

    if start_page == 1:
        sleep(np.random.random() * (MAX_SLEEP - MIN_SLEEP) + MIN_SLEEP)
        save_results(results=results, region=region, page=1)

    for page in range(max(2, start_page), start_page + num_iter):
        if page > total_num_pages:
            return False
        sleep(np.random.random() * (MAX_SLEEP - MIN_SLEEP) + MIN_SLEEP)
        try:
            results = request_results_page(page=page,
                                           cookies=cookies,
                                           user_agent=user_agent)
            save_results(results=results, region=region, page=page)
        except Exception as e:
            utils.add_log(region, e, is_exception=True)

    return True
Ejemplo n.º 3
0
def get_campaigns(list_id):
    config = frappe.get_single("MailChimp Settings")

    if not config.host or not config.api_key:
        frappe.throw(
            _("No configuration found. Please make sure that there is a MailChimpConnector configuration"
              ))

    if config.verify_ssl != 1:
        verify_ssl = False
    else:
        verify_ssl = True

    url = "{0}/campaigns?fields=campaigns.id,campaigns.status,campaigns.settings.title".format(
        config.host, list_id)
    raw = execute(url, config.api_key, None, verify_ssl, method="GET")
    results = json.loads(raw)
    for campaign in results['campaigns']:
        try:
            erp_campaign = frappe.get_doc("Campaign",
                                          campaign['settings']['title'])
            # update if applicable

        except:
            # erp does not know this campaignyet, create
            new_campaign = frappe.get_doc({'doctype': 'Campaign'})
            new_campaign.campaign_name = campaign['settings']['title']
            new_campaign.insert()

    add_log(
        title=_("Sync complete"),
        message=(_("Sync of campaigns from {0} completed.")).format(list_id),
        topic="MailChimp")
    return {'campaigns': results['campaigns']}
    def del_expire(self):

        db = sqlite3.connect(utils.DSM_CACHE_PATH)
        cursor = db.cursor()

        tbs = ['spider_cache', 'dsm_cache']
        try:
            for tb_name in tbs:
                cursor.execute('SELECT * FROM {}'.format(tb_name))

                rss = cursor.fetchall()

                for rs in rss:
                    name, ntime, expire, data, = rs
                    if expire > 0 and time() > expire:
                        cursor.execute('DELETE FROM {} WHERE name_hash=?'.format(tb_name), (name,))
                        utils.add_log(logging.getLogger('cache'), 'info', '删除过期缓存 del_expire:', tb_name, '-', name)

                db.commit()
        except Exception as e:

            utils.add_log(logging.getLogger('cache'), 'error', 'del_expire:', e)
        finally:
            cursor.close()
            db.close()
Ejemplo n.º 5
0
def request_url(url,
                type=None,
                params=None,
                headers=None,
                auth=None,
                files=None,
                setime=None):
    time = config.timeout
    if setime:
        time = setime
    try:
        data = requests.get(url,
                            params=params,
                            headers=headers,
                            auth=auth,
                            files=files,
                            timeout=time)
    except Exception as error:
        utils.add_log(
            str(error) + '\nURL: ' + str(url), 'Request-Except', True)
        return False

    if data.status_code == 200:
        return data
    else:
        utils.add_log(
            'Error in request! {}\n{}\n\n{}'.format(url, params, data.text),
            'Request', True)
    return False
    def start_thread(self, port):
        old_path = os.getcwd()
        if not os.path.exists(utils.CACHE_PATH):
            os.mkdir(utils.CACHE_PATH)

        web_dir = utils.CACHE_PATH
        os.chdir(web_dir)

        self.httpd = HTTPServer(("", port), SimpleHTTPRequestHandler)
        utils.add_log(self.logger, 'info', "serving at port {0}".format(port))
        self.httpd.serve_forever()

        os.chdir(old_path)
Ejemplo n.º 7
0
def enqueue_get_campaigns(list_id):
    add_log(title=_("Starting sync"),
            message=(_("Starting to sync campaigns from {0}")).format(list_id),
            topic="MailChimp")

    kwargs = {'list_id': list_id}
    enqueue("lifefair.lifefair.mailchimp.get_campaigns",
            queue='long',
            timeout=15000,
            **kwargs)
    frappe.msgprint(
        _("Queued for syncing. It may take a few minutes to an hour."))
    return
Ejemplo n.º 8
0
def crawl_region(region, start_page):
    utils.add_log(region, "-" * 50)
    should_continue = True
    while should_continue:
        should_continue = iter_results_pages(
            region=region,
            start_page=start_page,
            num_iter=NUM_REQUESTS_BEFORE_PAUSE,
            user_agent=utils.random_user_agent())
        if should_continue:
            start_page += NUM_REQUESTS_BEFORE_PAUSE
            sleep(PAUSE_SLEEP)

    save_concatenated_excel(region)
Ejemplo n.º 9
0
def cron_sync_eqsl(dry_run=False):
    """ https://www.eqsl.cc/qslcard/ImportADIF.txt """

    if dry_run:
        print("--- [DRY RUN] Sending logs to eQSL when requested")
    else:
        print("--- Sending logs to eQSL when requested")
    logs = Log.query.filter(Log.eqsl_qsl_sent == "R").all()
    config = Config.query.first()
    if not config:
        print("!!! Error: config not found")
        add_log(category="CONFIG", level="ERROR", message="Config not found")
        return

    for log in logs:
        status = eqsl_upload_log(log, config, dry_run)
        if dry_run:
            continue
        err = UserLogging()
        err.user_id = log.user.id
        err.log_id = log.id
        err.logbook_id = log.logbook.id
        err.category = "EQSL"

        if status["state"] == "error":
            err.level = "ERROR"
            print("!! Error uploading QSO {0} to eQSL: {1}".format(
                log.id, status["message"]))
        elif status["state"] == "rejected":
            log.eqsl_qsl_sent = "I"
            print("!! Rejected uploading QSO {0} to eQSL: {1}".format(
                log.id, status["message"]))
        else:
            err.level = "INFO"

        err.message = status["message"] + "\r\n"

        if "msgs" in status:
            for i in status["msgs"]:
                print("!! {0}: {1}".format(i[0], i[1]))
                err.message += "{0}: {1}\r\n".format(i[0], i[1])

        if status["state"] == "success":
            log.eqsl_qsl_sent = "Y"

        print(status)

        db.session.add(err)
        db.session.commit()
Ejemplo n.º 10
0
def save_concatenated_excel(region):
    try:
        filenames = sorted(glob(os.path.join(RESULTS_DIR, region, "*")),
                           key=lambda x: int(re.findall("\d+", x)[0]))
        dataset = pd.concat(
            (pd.read_csv(f, sep="\t", index_col=0) for f in filenames),
            ignore_index=True)
        dataset.to_excel(os.path.join(RESULTS_DIR, region + ".xls"),
                         region,
                         index=False,
                         encoding="utf-8")
        utils.add_log(region=region,
                      text="\nREGION DONE\nSaved %s" %
                      os.path.join(RESULTS_DIR, region + ".xls"))
    except Exception as e:
        utils.add_log(region, e, is_exception=True)
Ejemplo n.º 11
0
def enqueue_sync_contacts(list_id, type="Alle", meeting=None, owner=None):
    add_log(title=_("Starting sync"),
            message=(_("Starting to sync contacts to {0}")).format(list_id),
            topic="MailChimp")

    kwargs = {
        'list_id': list_id,
        'type': type,
        'meeting': meeting,
        'owner': owner
    }
    enqueue("lifefair.lifefair.mailchimp.sync_contacts",
            queue='long',
            timeout=15000,
            **kwargs)
    frappe.msgprint(
        _("Queued for syncing. It may take a few minutes to an hour."))
    return
Ejemplo n.º 12
0
    def start_train(self, model, session, summary_op, summary_writer,
                    checkpoint_prefix):
        for epoch in range(FLAGS.num_epochs):
            for batch in self.iter_batch(self.train_set, True):
                feed_dict = self.create_feed_dict(model, batch, True)
                step, summaries, loss, accuracy = model.run_step(
                    session, True, feed_dict, summary_op)
                summary_writer.add_summary(summaries, step)

                if step % FLAGS.steps_check == 0:
                    log_msg = 'epoch: {}/{}, step: {}, acc: {:g}, loss: {:g}'.format(
                        epoch + 1, FLAGS.num_epochs, step, accuracy, loss)
                    process_info(log_msg)

            if (epoch + 1) % FLAGS.save_epoch == 0:
                path = save_model(session, model, checkpoint_prefix, epoch)
                log_msg = 'epoch {} is over. Saved model checkpoint to {}\n'.format(
                    epoch + 1, path)
                add_log(log_msg)
def post_books():
    asin = request.args.get('asin')
    title = request.args.get('title')
    price = request.args.get('price')
    category = request.args.get('category')

    db = client['meta']
    collection = db['newmetadata']
    collection1 = db[
        'logs']  # Collection for logs -- create one in EC2 instance

    v = collection.find_one({"asin": asin})
    try:
        if asin == v["asin"]:
            response_code = 404
            add_log(
                request.method, request.url, {
                    "book_information": {
                        "title": title,
                        "price": price,
                        "category": category
                    }
                }, response_code, collection1)
            return {
                'message': 'Book exists. Please select another one!',
                'data': {}
            }, 404
    except:
        # add into metadata collection
        post_id = collection.insert_one(request.args.to_dict())
        vv = collection.find_one({"asin": asin})
        response_code = 201
        # add into logs collection
        add_log(
            request.method, request.url, {
                "book_information": {
                    "title": title,
                    "price": price,
                    "category": category
                }
            }, response_code, collection1)
        return {'message': 'Book added', 'data': str(vv)}, 201
Ejemplo n.º 14
0
def main(_):
    ensure_dir(FLAGS.models_dir)

    if FLAGS.is_train and FLAGS.clean:
        clean_model()

    init_logger(os.path.join(FLAGS.models_dir, FLAGS.log_file))

    if FLAGS.preprocess:
        preprocess()

    data_manager = DataManager()

    out_dir = os.path.join('.', FLAGS.models_dir)
    checkpoint_dir = os.path.join(out_dir, FLAGS.ckpt_path)
    checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
    ensure_dir(checkpoint_dir)

    gpu_options = tf.GPUOptions(allow_growth=True)
    session_conf = tf.ConfigProto(
        allow_soft_placement=FLAGS.allow_soft_placement,
        log_device_placement=FLAGS.log_device_placement,
        gpu_options=gpu_options)

    with tf.Session(config=session_conf) as sess:
        wordembedding = data_manager.get_wordembedding()
        model = create_model(sess, Model, FLAGS.is_train, FLAGS.pretrain_model,
                             wordembedding)
        model_engine = Engine(data_manager)

        if FLAGS.is_train:
            add_log('\nstart training...\n')
            summary_op, summary_writer = summary(out_dir, sess, model)
            model_engine.start_train(model, sess, summary_op, summary_writer,
                                     checkpoint_prefix)
            #model_engine.start_train(model, sess, summary_op, summary_writer)

        else:
            add_log('\nstart evaluating...\n')
            #reload_model(sess, model, checkpoint_dir)
            model_engine.start_test(model, sess, checkpoint_prefix)
Ejemplo n.º 15
0
def get_dxcc_from_clublog(callsign):
    config = Config.query.first()
    if not config:
        print("!!! Error: config not found")
        add_log(category="CONFIG", level="ERROR", message="Config not found")
        return

    clublog_api_key = config.clublog_api_key
    clublog_uri = "https://secure.clublog.org/dxcc?call={0}&api={1}&full=1".format(
        callsign, clublog_api_key)

    try:
        r = requests.get(clublog_uri)
    except:  # noqa: E722
        raise InvalidUsage("Error getting DXCC from ClubLog", status_code=500)

    if r.status_code != 200:
        raise InvalidUsage("Error getting DXCC from ClubLog",
                           status_code=r.status_code)

    return json.loads(r.content)
    def get_cache(self, filename, mtime=0):
        if not filename or mtime is None:
            return
        db = sqlite3.connect(utils.DSM_CACHE_PATH)
        cursor = db.cursor()
        try:
            cursor.execute('SELECT * FROM {} WHERE name_hash=?'.format(self.table_name), (self.__url_md5(filename),))

            rs = cursor.fetchone()
            if rs:
                name, ntime, expire, data, = rs
                now = time()
                if not name or ntime is None or not data:
                    return

                if (mtime > 0 and mtime == ntime) or (mtime == 0 and expire == 0) or (expire > 0 and now <= expire):
                    utils.add_log(self.logger, 'info', 'get_cache:', filename, mtime, expire)
                    return pickle.loads(data)

                utils.add_log(self.logger, 'info', '删除过期缓存:', filename, mtime, expire)
                cursor.execute('DELETE FROM {} WHERE name_hash=?'.format(self.table_name),
                               (self.__url_md5(filename),))
                return None

        except Exception as e:
            utils.add_log(self.logger, 'error', 'get_cache:', e)
        finally:
            cursor.close()
            db.close()
def delete_record():
    db = client['meta']
    collection = db['newmetadata']
    collection1 = db['logs']

    asin1 = request.args.get('asin')
    v = collection.find_one({"asin": asin1})
    try:
        if asin1 == v["asin"]:
            collection.remove({"asin": asin1})
            response_code = 201
            add_log(request.method, request.url, {"book deleted": str(v)},
                    response_code, collection1)
            return {'message': 'Deleted metadata of book', 'data': str(v)}, 201
    except:
        response_code = 404
        add_log(request.method, request.url,
                {"book does not exist so not deleted": asin1}, response_code,
                collection1)
        return {
            'message': 'Book does not exist so cannot delete metadata',
            'data': {}
        }, 404
Ejemplo n.º 18
0
def preparation(args):
    # set model save path
    if not args.exp_id:
        args.exp_id = str(int(time.time()))
    args.current_save_path = os.path.join(args.dump_path, args.exp_name,
                                          args.exp_id)
    if not os.path.exists(args.current_save_path):
        os.makedirs(args.current_save_path, exist_ok=True)
    local_time = time.localtime()
    args.log_file = os.path.join(
        args.current_save_path,
        time.strftime("log_%Y_%m_%d_%H_%M_%S.txt", local_time))
    args.output_file = os.path.join(
        args.current_save_path,
        time.strftime("output_%Y_%m_%d_%H_%M_%S.txt", local_time))

    add_log(args, "exp id : %s" % args.exp_id)
    add_log(args, "Path: %s is created" % args.current_save_path)
    add_log(args, "create log file at path: %s" % args.log_file)

    # prepare data
    args.id_to_word, args.vocab_size = prepare_data(args)
def search_books():
    page = request.args.get('page')
    if page == None:
        page = 2
    page = int(page)

    db = client['meta']
    collection = db['newmetadata']
    collection1 = db['logs']
    if (request.args.get("title") != None):
        mytitle = request.args.get("title")
        v = collection.find_one(
            {"title": {
                '$regex': mytitle,
                '$options': 'i'
            }})
        if v != None:
            response_code = 201
            add_log(request.method, request.url, {
                "book_information": {
                    "title searched": request.args.get("title")
                }
            }, response_code, collection1)
            return dumps(v)
        else:
            response_code = 404
            add_log(request.method, request.url, {
                "book_information": {
                    "title searched": request.args.get("title")
                }
            }, response_code, collection1)
            return {'message': 'Book does not exists!', 'data': {}}, 404
    if (request.args.get("author") != None):
        myauthor = request.args.get("author")
        v = collection.find({"author": {
            '$regex': myauthor,
            '$options': 'i'
        }})  # add .skip(page).limit(8)
        if v != None:
            response_code = 201
            add_log(request.method, request.url, {
                "book_information": {
                    "author searched": request.args.get("title")
                }
            }, response_code, collection1)
            return dumps(v)
        else:
            response_code = 404
            add_log(request.method, request.url, {
                "book_information": {
                    "author searched": request.args.get("title")
                }
            }, response_code, collection1)
            return {'message': 'Book does not exists!', 'data': {}}, 404
    if (request.args.get("category") != None):
        v = collection.find({
            "categories": {
                "$elemMatch": {
                    "$elemMatch": {
                        "$in": [request.args.get("category")]
                    }
                }
            }
        }).skip(page).limit(8)
        if (v.count() == 0):
            response_code = 404
            add_log(
                request.method, request.url, {
                    "book_information": {
                        "category searched": request.args.get("category")
                    }
                }, response_code, collection1)
            return {'message': 'Book does not exists!', 'data': {}}, 404
        if (v != None):
            response_code = 201
            add_log(
                request.method, request.url, {
                    "book_information": {
                        "category searched": request.args.get("category")
                    }
                }, response_code, collection1)
            return dumps(v)
    else:
        return {'message': "Error"}, 404
Ejemplo n.º 20
0
#!/usr/bin/env python3
import datetime
import setup
import config
import utils
import create_weekly_digest
import logs_repo as logfile

args = setup.args

logfile.sync_local_logfile()

if args.today:
    filter_today = utils.filter_by_date(config.today)
    utils.display_logs(filter_today, 'All logs from today: ')
elif args.thisweek:
    filter_this_week = utils.filter_by_date(config.beginning_of_week)
    utils.display_logs(filter_this_week, 'All logs from this week: ')
elif args.tags:
    filter_by_tag = utils.filter_by_tags(args.tags)
    utils.display_logs(filter_by_tag, f'All logs tagged with: {args.tags}')
elif args.all:
    utils.display_logs(utils.return_all, 'All logs:')
elif args.sync:
    logfile.sync_remote_logfile()
elif args.digest:
    create_weekly_digest.create()
else:
    utils.add_log()
    logfile.commit_local_logfile()
Ejemplo n.º 21
0
def sedat_eval(args, ae_model, f, deb):
    """
    Input: 
        Original latent representation z : (n_batch, batch_size, seq_length, latent_size)
    Output: 
        An optimal modified latent representation z'
    """
    max_sequence_length = args.max_sequence_length
    id_bos = args.id_bos
    id_to_word = args.id_to_word
    limit_batches = args.limit_batches

    eval_data_loader = non_pair_data_loader(
        batch_size=args.batch_size,
        id_bos=args.id_bos,
        id_eos=args.id_eos,
        id_unk=args.id_unk,
        max_sequence_length=args.max_sequence_length,
        vocab_size=args.vocab_size)
    file_list = [args.test_data_file]
    eval_data_loader.create_batches(args,
                                    file_list,
                                    if_shuffle=False,
                                    n_samples=args.test_n_samples)
    if args.references_files:
        gold_ans = load_human_answer(args.references_files, args.text_column)
        assert len(gold_ans) == eval_data_loader.num_batch
    else:
        gold_ans = None

    add_log(args, "Start eval process.")
    ae_model.eval()
    f.eval()
    deb.eval()

    text_z_prime = {}
    text_z_prime = {
        "source": [],
        "origin_labels": [],
        "before": [],
        "after": [],
        "change": [],
        "pred_label": []
    }
    if gold_ans is not None:
        text_z_prime["gold_ans"] = []
    z_prime = []
    n_batches = 0
    for it in tqdm.tqdm(list(range(eval_data_loader.num_batch)), desc="SEDAT"):

        _, tensor_labels, \
        tensor_src, tensor_src_mask, tensor_src_attn_mask, tensor_tgt, tensor_tgt_y, \
        tensor_tgt_mask, _ = eval_data_loader.next_batch()
        # only on negative example
        negative_examples = ~(tensor_labels.squeeze() == args.positive_label)
        tensor_labels = tensor_labels[negative_examples].squeeze(
            0)  # .view(1, -1)
        tensor_src = tensor_src[negative_examples].squeeze(0)
        tensor_src_mask = tensor_src_mask[negative_examples].squeeze(0)
        tensor_src_attn_mask = tensor_src_attn_mask[negative_examples].squeeze(
            0)
        tensor_tgt_y = tensor_tgt_y[negative_examples].squeeze(0)
        tensor_tgt = tensor_tgt[negative_examples].squeeze(0)
        tensor_tgt_mask = tensor_tgt_mask[negative_examples].squeeze(0)
        if negative_examples.any():
            if gold_ans is not None:
                text_z_prime["gold_ans"].append(gold_ans[it])

            text_z_prime["source"].append(
                [id2text_sentence(t, args.id_to_word) for t in tensor_tgt_y])
            text_z_prime["origin_labels"].append(tensor_labels.cpu().numpy())

            origin_data, _ = ae_model.forward(tensor_src, tensor_tgt,
                                              tensor_src_mask,
                                              tensor_src_attn_mask,
                                              tensor_tgt_mask)

            generator_id = ae_model.greedy_decode(origin_data,
                                                  max_len=max_sequence_length,
                                                  start_id=id_bos)
            generator_text = [
                id2text_sentence(gid, id_to_word) for gid in generator_id
            ]
            text_z_prime["before"].append(generator_text)

            data = deb(origin_data, mask=None)
            data = torch.sum(ae_model.sigmoid(data),
                             dim=1)  # (batch_size, d_model)
            #logit = ae_model.decode(data.unsqueeze(1), tensor_tgt, tensor_tgt_mask)  # (batch_size, max_tgt_seq, d_model)
            #output = ae_model.generator(logit)  # (batch_size, max_seq, vocab_size)
            y_hat = f.forward(data)
            y_hat = y_hat.round().int()
            z_prime.append(data)
            generator_id = ae_model.greedy_decode(data,
                                                  max_len=max_sequence_length,
                                                  start_id=id_bos)
            generator_text = [
                id2text_sentence(gid, id_to_word) for gid in generator_id
            ]
            text_z_prime["after"].append(generator_text)
            text_z_prime["change"].append([True] * len(y_hat))
            text_z_prime["pred_label"].append([y_.item() for y_ in y_hat])

            n_batches += 1
            if n_batches > limit_batches:
                break
    write_text_z_in_file(args, text_z_prime)
    add_log(args, "")
    add_log(args,
            "Saving model modify embedding %s ..." % args.current_save_path)
    torch.save(z_prime,
               os.path.join(args.current_save_path, 'z_prime_sedat.pkl'))
    return z_prime, text_z_prime
 def add_log(self, *msg, level='info'):
     utils.add_log(self.logger, level, msg)
    def download_page_request(self,
                              url,
                              retry=0,
                              referer='',
                              meth='GET',
                              data=''):
        if not url or self.stoped:
            utils.add_log(self.logger, 'error', 'Url为空', url)
            return

        try:
            if meth != 'POST':
                res = self.cache.get_cache(url)
                if res:
                    return res

            if meth == 'GET':
                res = self.RequestSession.get(url, timeout=utils.DOWN_TIME_OUT)
            elif meth == 'POST' and data:
                res = self.RequestSession.post(url,
                                               timeout=utils.DOWN_TIME_OUT,
                                               data=data)
            else:
                return None
            sleep(utils.SPIDER_DOWNLOAD_SLEEP_TIME)
            if res.status_code == 200:
                if referer == '':
                    referer = res.url
                self.RequestSession.headers.update({
                    'referer': referer,
                    'Referer': referer
                })
                if meth != 'POST':
                    utils.add_log(
                        self.logger,
                        'info',
                        'save_cache expire:',
                        url,
                    )
                    if utils.IMG_CACHE_KEEP_INFINITE and utils.IsValidImage(
                            res.content):
                        self.cache.save_cache(url, res, expire_time=0, mtime=0)
                        self.add_log('永久缓存', url)
                    else:
                        self.cache.save_cache(url, res, time())
                return res
            else:
                if retry >= utils.RETRYMAX:
                    utils.add_log(self.logger, 'info', 'Url下载出错,重试达到最大数', url)
                    return None
                else:
                    retry += 1

                    utils.add_log(self.logger, 'info',
                                  'Url下载出错{},重试{}'.format(url, retry))
                    sleep(5)
                    return self.download_page_request(url, retry)
        except (RequestException, OpenSSL.SSL.Error):
            if retry >= utils.RETRYMAX:
                utils.add_log(self.logger, 'info', 'Url下载出错,重试达到最大数', url)
                return None
            else:
                retry += 1
                utils.add_log(self.logger, 'info',
                              'Url下载出错{},重试{}'.format(url, retry))
                sleep(5)
                return self.download_page_request(url, retry)
Ejemplo n.º 24
0
def cron_sync_from_eqsl(dry_run=False):
    """ https://www.eqsl.cc/qslcard/DownloadInBox.txt """
    """
    todo: to avoid downloading too much (not really in fact) logs, get the oldest time with a eqsl_rcvd == 'N'
    and put it in query with RcvdSince = (YYYYMMDDHHMM)
    """

    if dry_run:
        print("-- [DRY RUN] Fetching logs from eQSL")
    else:
        print("-- Fetching logs from eQSL")

    _logbooks = Logbook.query.filter(
        Logbook.eqsl_qth_nickname.isnot(None)).all()
    for logbook in _logbooks:
        if not logbook.user.eqsl_name or not logbook.user.eqsl_password:
            continue  # Skip logbooks of user not using eQSL

        config = Config.query.first()
        if not config:
            print("!!! Error: config not found")
            add_log(category="CONFIG",
                    level="ERROR",
                    message="Config not found")
            return

        print("-- Working on logbook [{0}] {1}".format(logbook.id,
                                                       logbook.name))

        _payload = urllib.parse.urlencode({
            "UserName":
            logbook.user.eqsl_name,
            "Password":
            logbook.user.eqsl_password,
            "QTHNickname":
            logbook.eqsl_qth_nickname,
        })

        _url = "{0}?{1}".format(config.eqsl_download_url, _payload)

        _req = urllib.request.Request(_url)
        _text = None

        err_fetch = UserLogging()
        err_fetch.user_id = logbook.user.id
        err_fetch.logbook_id = logbook.id
        err_fetch.category = "EQSL FETCH"

        try:
            with urllib.request.urlopen(_req) as f:
                _text = f.read().decode("UTF-8")
        except urllib.error.URLError as e:
            err_fetch.level = "ERROR"
            err_fetch.message = "Error fetching from eQSL: {0}".format(e)
            db.session.add(err_fetch)
            db.session.commit()
            continue  # skip to next

        if not _text:
            err_fetch.level = "ERROR"
            err_fetch.message = "Error fetching from EQSL, _text undefined"
            db.session.add(err_fetch)
            db.session.commit()
            continue  # skip to next

        # Now get the download link
        # <li><a href="downloadedfiles/xxx.adi">.ADI file</a>
        m = re.search('<A HREF="(.*)">.ADI file</A>', _text)

        if m:
            _file_path = m.group(1)
            _url = "{0}/{1}".format(os.path.dirname(config.eqsl_download_url),
                                    _file_path)
            _req = urllib.request.Request(_url)
            _text = None

            try:
                print("-- Fetching ADIF {0}".format(_url))
                with urllib.request.urlopen(_req) as f:
                    # eQSL returns a file encoded in ISO8859-1 so decode it then re-encode it in UTF-8
                    _text = f.read().decode("ISO8859-1").encode("UTF-8")
            except urllib.error.URLError as e:
                err_fetch.level = "ERROR"
                err_fetch.message = "Error fetching from eQSL: {0}".format(e)
                db.session.add(err_fetch)
                db.session.commit()
                continue  # skip to next

            if not _text:
                err_fetch.level = "ERROR"
                err_fetch.message = "Error fetching from EQSL, _text for final URL undefined"
                db.session.add(err_fetch)
                db.session.commit()
                continue  # skip to next

            adif = parse_adif(_text)

            for log in adif:
                err_log = UserLogging()
                err_log.user_id = logbook.user.id
                err_log.logbook_id = logbook.id
                err_log.category = "EQSL LOG"

                _date = "{0} {1}".format(log["qso_date"], log["time_on"])
                _date_first = datetime.datetime.strptime(
                    _date + "00", "%Y%m%d %H%M%S")
                _date_second = datetime.datetime.strptime(
                    _date + "59", "%Y%m%d %H%M%S")
                # Try to find a matching log entry
                qso = Log.query.filter(
                    Log.logbook_id == logbook.id,
                    Log.user_id == logbook.user.id,
                    Log.call == log["call"].upper(),
                    Log.time_on.between(_date_first, _date_second),
                ).first()
                if qso:
                    if qso.eqsl_qsl_rcvd == "Y":
                        continue  # this eQSL have already been matched
                    print(
                        "-- Matching log found for {0} on {1} : ID {2}".format(
                            log["call"], _date, qso.id))
                    if not dry_run:
                        qso.eqsl_qsl_rcvd = "Y"
                        err_log.log_id = qso.id
                        err_log.level = "INFO"
                        err_log.message = "QSO from eQSL by {0} on {1} received and updated".format(
                            log["call"], _date)
                else:
                    print("-- No matching log found for {0} on {1}".format(
                        log["call"], _date))
                    err_log.level = "INFO"
                    err_log.message = "QSO from eQSL by {0} on {1} not found in database".format(
                        log["call"], _date)
                if not dry_run:
                    db.session.add(err_log)
                    db.session.commit()
        else:
            err_fetch.level = "ERROR"
            err_fetch.message = "Error fetching from EQSL, link not found in body"
            db.session.add(err_fetch)
            db.session.commit()
Ejemplo n.º 25
0
def sync_contacts(list_id, type, meeting=None, owner=None):
    # get settings
    config = frappe.get_single("MailChimp Settings")

    if not config.host or not config.api_key:
        frappe.throw(
            _("No configuration found. Please make sure that there is a MailChimp configuration"
              ))
    if config.verify_ssl != 1:
        verify_ssl = False
    else:
        verify_ssl = True

    # get the ERP contact list
    if type.lower() == "alle":
        sql_query = """SELECT
                `tabPerson`.`name` AS `name`,
                `tabPerson`.`letter_salutation` AS `letter_salutation`,
                `tabPerson`.`salutation` AS `salutation`,
                `tabPerson`.`email` AS `email`,
                `tabPerson`.`do_not_contact` AS `do_not_contact`,
                `tabPerson`.`first_name` AS `first_name`,
                `tabPerson`.`last_name` AS `last_name`,
                `tabPerson`.`primary_organisation` AS `organisation`,
                'Person' AS `doctype`
            FROM
                `tabPerson`
            WHERE
                `tabPerson`.`email` LIKE '%@%.%'
            UNION SELECT
                `tabPublic Mail`.`name` AS `name`,
                `tabPublic Mail`.`letter_salutation` AS `letter_salutation`,
                '' AS `salutation`,
                `tabPublic Mail`.`email` AS `email`,
                `tabPublic Mail`.`do_not_contact` AS `do_not_contact`,
                '' AS `first_name`,
                '' AS `last_name`,
                `tabPublic Mail`.`organisation` AS `organisation`,
                'Public Mail' AS `doctype`
            FROM
                `tabPublic Mail`
            WHERE
                `tabPublic Mail`.`email` LIKE '%@%.%'"""
    else:
        sql_query = """SELECT
                `tabPerson`.`name` AS `name`,
                `tabPerson`.`letter_salutation` AS `letter_salutation`,
                `tabPerson`.`salutation` AS `salutation`,
                `tabPerson`.`email` AS `email`,
                `tabPerson`.`do_not_contact` AS `do_not_contact`,
                `tabPerson`.`first_name` AS `first_name`,
                `tabPerson`.`last_name` AS `last_name`,
                `tabPerson`.`primary_organisation` AS `organisation`,
                'Person' AS `doctype`
            FROM
                `tabPerson`
            WHERE
                `tabPerson`.`email` LIKE '%@%.%'
                AND `tabPerson`.`is_vip` = 0
                AND !( `tabPerson`.`name` IN (SELECT
                    `tabRegistration`.`person`
                    FROM `tabRegistration`
                    WHERE `tabRegistration`.`meeting` = '{meeting}'))
            UNION SELECT
                `tabPublic Mail`.`name` AS `name`,
                `tabPublic Mail`.`letter_salutation` AS `letter_salutation`,
                '' AS `salutation`,
                `tabPublic Mail`.`email` AS `email`,
                `tabPublic Mail`.`do_not_contact` AS `do_not_contact`,
                '' AS `first_name`,
                '' AS `last_name`,
                `tabPublic Mail`.`organisation` AS `organisation`,
                'Public Mail' AS `doctype`
            FROM
                `tabPublic Mail`
            WHERE
                `tabPublic Mail`.`email` LIKE '%@%.%'""".format(
            meeting=meeting)
    erp_members = frappe.db.sql(sql_query, as_dict=True)

    # get all members from the MailChimp list
    repeat = True
    offset = 0
    while repeat:
        mc_members = get_members(list_id, count=1000, offset=offset)['members']
        if len(mc_members) > 0:
            for mc_member in mc_members:
                # print(mc_member['email_address'])
                check_mc_member_in_erp(mc_member, erp_members)
            offset += 1000
        else:
            repeat = False

    # now get all erp members to MailChmp
    contact_written = []
    for erp_member in erp_members:
        # compute mailchimp id (md5 hash of lower-case email)
        try:
            mc_id = hashlib.md5(erp_member['email'].lower()).hexdigest()
        except:
            frappe.log_error(
                "Invalid email address (unable to compute hash): {0}".format(
                    erp_member['email']), "Invalid email address")
        # load subscription status from mailchimp if it is set as master
        # default is unsubscribed
        contact_status = "unsubscribed"
        if erp_member['do_not_contact'] == 1:
            contact_status = "unsubscribed"
        else:
            contact_status = "subscribed"

        url = "{0}/lists/{1}/members/{2}".format(config.host, list_id, mc_id)
        method = "PUT"

        contact_object = {
            "email_address": erp_member['email'],
            "status": contact_status,
            "merge_fields": {
                "FNAME": erp_member['first_name'] or "",
                "LNAME": erp_member['last_name'] or "",
                "TITEL": erp_member['salutation'] or "",
                "ANREDE": erp_member['letter_salutation'] or ""
            }
        }

        raw = execute(host=url,
                      api_token=config.api_key,
                      payload=contact_object,
                      verify_ssl=verify_ssl,
                      method=method)
        print("Updated to MailChimp {0}: {1}".format(erp_member['email'], raw))
        contact_written.append(erp_member['email'])

    url = "{0}/lists/{1}/members?fields=members.id,members.email_address,members.status".format(
        config.host, list_id)
    raw = execute(url, config.api_key, None, verify_ssl)
    results = json.loads(raw)

    if owner:
        frappe.publish_realtime(
            event='msgprint',
            message=_("Synchronisation to MailChimp complete"),
            user=owner)
    add_log(title=_("Sync complete"),
            message=(_("Sync to {0} completed, written {1} contacts.")).format(
                list_id, len(contact_written)),
            topic="MailChimp")
    return {'members': results['members']}
Ejemplo n.º 26
0
def update_dxcc_from_cty_xml(_file=None, silent=False):
    if not silent:
        print(
            "--- Updating DXCC tables (prefixes, entities, exceptions) from cty.xml"
        )
    fname = os.path.join(current_app.config["TEMP_DOWNLOAD_FOLDER"], "cty.xml")

    config = Config.query.first()
    if not config:
        if not silent:
            print("!!! Error: config not found")
        add_log(category="CONFIG", level="ERROR", message="Config not found")
        raise Exception("config not found")

    if os.path.isfile(fname):
        os.remove(fname)
        if not silent:
            print("-- Removed old file {0}".format(fname))

    if not _file:
        if not silent:
            print("-- Downloading...")
        if not config.clublog_api_key:
            if not silent:
                print("!! Clublog API Key not defined")
            add_log(category="CRONS",
                    level="ERROR",
                    message="Clublog API Key not defined")
            raise Exception("no clublog_api_key in config")
        url = "https://secure.clublog.org/cty.php?api={0}".format(
            config.clublog_api_key)

        try:
            with urllib.request.urlopen(url) as response, open(
                    fname, "wb") as out_file:
                with gzip.GzipFile(fileobj=response) as uncompressed:
                    shutil.copyfileobj(uncompressed, out_file)
        except urllib.error.URLError as err:
            if not silent:
                print("!! Error: {0}".format(err))
            raise Exception(f"error: {err}")
        if not silent:
            print("-- File downloaded at {0}".format(fname))
    elif os.path.isfile(_file):
        fname = _file
        if not silent:
            print("-- File at {0}".format(fname))
    else:
        if not silent:
            print("-- what are you trying to do ?")
        raise Exception("unknown error")

    # Now parse XML file
    tree = None
    try:
        tree = ElementTree.parse(fname)
    except FileNotFoundError as err:
        if not silent:
            print("!! Error: {0}".format(err))
        raise Exception(f"file not found: {err}")
    except ElementTree.ParseError as err:
        if not silent:
            print("!! Error: {0}".format(err))
        raise Exception(f"XML Parsing error: {err}")

    if not tree:
        raise Exception("XML tree is none")

    root = tree.getroot()

    for element in root:
        if element.tag == "{http://www.clublog.org/cty/v1.0}entities":
            if not silent:
                print("++ Parsing {0}".format(element.tag))
            rmed = DxccEntities.query.delete()
            if not silent:
                print("-- Cleaned {0} old entries".format(rmed))
            parse_element(element, silent)

        elif element.tag == "{http://www.clublog.org/cty/v1.0}exceptions":
            if not silent:
                print("++ Parsing {0}".format(element.tag))
            rmed = DxccExceptions.query.delete()
            if not silent:
                print("-- Cleaned {0} old entries".format(rmed))
            parse_element(element, silent)

        elif element.tag == "{http://www.clublog.org/cty/v1.0}prefixes":
            if not silent:
                print("++ Parsing {0}".format(element.tag))
            rmed = DxccPrefixes.query.delete()
            if not silent:
                print("-- Cleaned {0} old entries".format(rmed))
            parse_element(element, silent)
Ejemplo n.º 27
0
    def start_test(self, model, session, checkpoint_prefix):
        epoch_range = eval(FLAGS.epoch_range)
        epoch_range = range(epoch_range[0], epoch_range[1])
        save_x = None
        save_y = None
        best_auc = 0
        best_epoch = -1

        for epoch in epoch_range:
            if not os.path.exists('{}-{}.index'.format(checkpoint_prefix,
                                                       epoch)):
                continue

            add_log('start testing model-{}'.format(epoch))
            reload_model(session, model, checkpoint_prefix, epoch)

            test_result = []
            total_recall = 0

            for i, batch in enumerate(self.iter_batch(self.test_set, False)):
                feed_dict = self.create_feed_dict(model, batch, False)
                test_output = model.run_step(session, False, feed_dict)[0]

                for j in range(len(test_output)):
                    pred = test_output[j]
                    entity_pair = self.test_set.instance_entity[
                        j + batch.start_index]
                    for rel in range(1, len(pred)):
                        flag = int(((entity_pair[0], entity_pair[1], rel)
                                    in self.test_set.instance_triple))
                        total_recall += flag
                        test_result.append([(entity_pair[0], entity_pair[1],
                                             rel), flag, pred[rel]])

                if i % 100 == 0:
                    process_info('predicting {} / {}\r'.format(
                        i, batch.num_batches))

            add_log('\nevaluating...')

            sorted_test_result = sorted(test_result, key=lambda x: x[2])
            # Reference url: https://blog.csdn.net/zk_j1994/article/details/78478502
            pr_result_x = []  # recall
            pr_result_y = []  # precision
            correct = 0
            for i, item in enumerate(sorted_test_result[::-1]):
                if item[1] == 1:  # flag == 1
                    correct += 1
                pr_result_y.append(float(correct) / (i + 1))
                pr_result_x.append(float(correct) / total_recall)

            auc = sklearn.metrics.auc(x=pr_result_x, y=pr_result_y)
            prec_mean = (pr_result_y[100] + pr_result_y[200] +
                         pr_result_y[300]) / 3

            add_log(
                'auc: {:g}\np@100: {:g}\np@200: {:g}\np@300: {:g}\np@(100,200,300) mean: {:g}\n'
                .format(auc, pr_result_y[100], pr_result_y[200],
                        pr_result_y[300], prec_mean))

            if auc > best_auc:
                best_auc = auc
                best_epoch = epoch
                save_x = pr_result_x
                save_y = pr_result_y

        ensure_dir(os.path.join(FLAGS.models_dir, FLAGS.res_path))

        new_res_path = os.path.join(FLAGS.models_dir, FLAGS.res_path)
        np.save(os.path.join(new_res_path, 'model_x.npy'), save_x)
        np.save(os.path.join(new_res_path, 'model_y.npy'), save_y)
        add_log('best_auc: {:g}; best_epoch: {}'.format(best_auc, best_epoch))
Ejemplo n.º 28
0
def main(args):
    model = utils.get_arch(arch=args.arch, dataset=args.dataset)

    if args.optim == 'sgd':
        optim = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=args.momentum)
    elif args.optim == 'adam':
        optim = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    criterion = torch.nn.CrossEntropyLoss()
    train_loader = utils.get_loader(args.dataset,
                                    args.batch_size,
                                    train=True,
                                    training=True)

    attacker = utils.PGDAttacker(
        radius=args.pgd_radius,
        steps=args.pgd_steps,
        step_size=args.pgd_step_size,
        random_start=args.pgd_random_start,
        norm_type=args.pgd_norm_type,
    )

    log = dict()

    if not args.cpu:
        model.cuda()
        criterion = criterion.cuda()

    if args.resume:
        # raise NotImplementedError
        state_dict = torch.load('{}-model.pkl'.format(args.resume_path))
        model.load_state_dict(state_dict['model_state_dict'])
        optim.load_state_dict(state_dict['optim_state_dict'])

        with open('{}-log.pkl'.format(args.resume_path), 'rb') as f:
            log = pickle.load(f)

    # x, y = next(train_loader)
    # if not args.cpu: x, y = x.cuda(), y.cuda()
    # adv_x = attacker.perturb(model, criterion, x, y)
    # max_grad_norm, grad_norm_list = get_grad_norm(model,criterion,adv_x,y)
    # utils.add_log(log, 'max_grad_norm', max_grad_norm)
    # utils.add_log(log, 'grad_norm_list', grad_norm_list)
    # logger.info('step [{}/{}]: max_grad_norm {:.3e}'
    #             .format(0, args.train_steps, max_grad_norm))
    # logger.info('')

    if not args.resume:
        ''' save the initial model parameter '''
        save_checkpoint('ckpt-{}'.format(0), '{}/ckpts/'.format(args.save_dir),
                        model, optim, log)

    start_step = args.resume_train_step if args.resume else 0
    for step in range(start_step, args.train_steps, 1):
        lr = args.lr * (args.lr_decay_rate**(step // args.lr_decay_freq))
        for group in optim.param_groups:
            group['lr'] = lr

        x, y = next(train_loader)
        if not args.cpu:
            x, y = x.cuda(), y.cuda()
        adv_x = attacker.perturb(model, criterion, x, y)

        if (step + 1) % args.calc_mg_freq == 0:
            max_grad_norm, grad_norm_list = get_grad_norm(
                model, criterion, adv_x, y)
            utils.add_log(log, 'max_grad_norm', max_grad_norm)
            utils.add_log(log, 'grad_norm_list', grad_norm_list)
            logger.info('step [{}/{}]: max_grad_norm {:.3e}'.format(
                step + 1, args.train_steps, max_grad_norm))
            logger.info('')

        with torch.no_grad():
            model.eval()
            _y = model(x)
            nat_loss = criterion(_y, y)
            nat_acc = (_y.argmax(dim=1) == y).sum().item() / len(y)
            utils.add_log(log, 'nat_loss', nat_loss.item())
            utils.add_log(log, 'nat_acc', nat_acc)

        # ''' ERM begin '''
        # model.train()
        # _y = model(x)
        # nat_loss = criterion(_y, y)
        # nat_acc = (_y.argmax(dim=1) == y).sum().item() / len(y)
        # utils.add_log(log, 'nat_loss', nat_loss.item())
        # utils.add_log(log, 'nat_acc', nat_acc)

        # model.zero_grad()
        # nat_loss.backward()

        # nat_grad_norm = 0
        # for pp in model.parameters():
        #     nat_grad_norm += (pp.grad.data**2).sum().item()
        # nat_grad_norm = np.sqrt(nat_grad_norm)
        # utils.add_log(log, 'nat_grad_norm', nat_grad_norm)
        # ''' ERM end '''
        ''' adv begin (includes gradient descent) '''
        model.train()
        _y = model(adv_x)
        adv_loss = criterion(_y, y)
        adv_acc = (_y.argmax(dim=1) == y).sum().item() / len(y)
        utils.add_log(log, 'adv_loss', adv_loss.item())
        utils.add_log(log, 'adv_acc', adv_acc)

        optim.zero_grad()
        adv_loss.backward()
        optim.step()

        adv_grad_norm = 0
        for pp in model.parameters():
            adv_grad_norm += (pp.grad.data**2).sum().item()
        adv_grad_norm = np.sqrt(adv_grad_norm)
        utils.add_log(log, 'adv_grad_norm', adv_grad_norm)
        ''' adv end '''

        # xjb_rate = batch_grad_norm / old_batch_grad_norm
        # logger.info('RI??? {:.3e}'.format(xjb_rate))

        if (step + 1) % args.report_freq == 0:
            logger.info('step [{}/{}]:'.format(step + 1, args.train_steps))
            logger.info('nat_acc {:.2%} \t nat_loss {:.3e}'.format(
                nat_acc, nat_loss.item()))
            logger.info('adv_acc {:.2%} \t adv_loss {:.3e}'.format(
                adv_acc, adv_loss.item()))
            # logger.info('nat_grad_norm {:.3e} \t adv_grad_norm {:.3e} \t rate {:.3e}'
            #             .format( nat_grad_norm, adv_grad_norm, adv_grad_norm/nat_grad_norm ))
            logger.info('')

        if (step+1) % args.save_freq == 0 \
            or (step+1) == args.train_steps:
            save_checkpoint('ckpt-{}'.format(step + 1),
                            '{}/ckpts/'.format(args.save_dir), model, optim,
                            log)
Ejemplo n.º 29
0
def save_results(results, region, page):
    filename = os.path.join(RESULTS_DIR, region, "page_%d.txt" % page)
    results.to_csv(filename, sep="\t")
    utils.add_log(region, "Page %d done" % page)
Ejemplo n.º 30
0
def train_step(args, data_loader, ae_model, dis_model, ae_optimizer,
               dis_optimizer, ae_criterion, dis_criterion, epoch):
    ae_model.train()
    dis_model.train()

    loss_ae, n_words_ae, xe_loss_ae, n_valid_ae = 0, 0, 0, 0
    loss_clf, total_clf, n_valid_clf = 0, 0, 0
    epoch_start_time = time.time()
    for it in range(data_loader.num_batch):
        flag_rec = True
        batch_sentences, tensor_labels, \
        tensor_src, tensor_src_mask, tensor_src_attn_mask, tensor_tgt, tensor_tgt_y, \
        tensor_tgt_mask, tensor_ntokens = data_loader.next_batch()

        # Forward pass
        latent, out = ae_model.forward(tensor_src, tensor_tgt, tensor_src_mask,
                                       tensor_src_attn_mask, tensor_tgt_mask)

        # Loss calculation
        if not args.sedat:
            loss_rec = ae_criterion(out.contiguous().view(-1, out.size(-1)),
                                    tensor_tgt_y.contiguous().view(-1)) / (
                                        tensor_ntokens.data + eps)

        else:
            # only on positive example
            positive_examples = tensor_labels.squeeze() == args.positive_label
            out = out[positive_examples]  # or out[positive_examples,:,:]
            tensor_tgt_y = tensor_tgt_y[
                positive_examples]  # or tensor_tgt_y[positive_examples,:]
            tensor_ntokens = (tensor_tgt_y != 0).data.sum().float()
            loss_rec = ae_criterion(out.contiguous().view(-1, out.size(-1)),
                                    tensor_tgt_y.contiguous().view(-1)) / (
                                        tensor_ntokens.data + eps)
            flag_rec = positive_examples.any()
            out = out.squeeze(0)
            tensor_tgt_y = tensor_tgt_y.squeeze(0)

        if flag_rec:
            n_v, n_w = get_n_v_w(tensor_tgt_y, out)
        else:
            n_w = float("nan")
            n_v = float("nan")

        x_e = loss_rec.item() * n_w
        loss_ae += loss_rec.item()
        n_words_ae += n_w
        xe_loss_ae += x_e
        n_valid_ae += n_v
        ae_acc = 100. * n_v / (n_w + eps)
        avg_ae_acc = 100. * n_valid_ae / (n_words_ae + eps)
        avg_ae_loss = loss_ae / (it + 1)
        ae_ppl = np.exp(x_e / (n_w + eps))
        avg_ae_ppl = np.exp(xe_loss_ae / (n_words_ae + eps))

        ae_optimizer.zero_grad()
        loss_rec.backward(retain_graph=not args.detach_classif)
        ae_optimizer.step()

        # Classifier
        if args.detach_classif:
            dis_lop = dis_model.forward(to_var(latent.clone()))
        else:
            dis_lop = dis_model.forward(latent)

        loss_dis = dis_criterion(dis_lop, tensor_labels)

        dis_optimizer.zero_grad()
        loss_dis.backward()
        dis_optimizer.step()

        t_c = tensor_labels.view(-1).size(0)
        n_v = (dis_lop.round().int() == tensor_labels).sum().item()
        loss_clf += loss_dis.item()
        total_clf += t_c
        n_valid_clf += n_v
        clf_acc = 100. * n_v / (t_c + eps)
        avg_clf_acc = 100. * n_valid_clf / (total_clf + eps)
        avg_clf_loss = loss_clf / (it + 1)
        if it % args.log_interval == 0:
            add_log(
                args, 'epoch {:3d} | {:5d}/{:5d} batches |'.format(
                    epoch, it, data_loader.num_batch))
            add_log(
                args,
                'Train : rec acc {:5.4f} | rec loss {:5.4f} | ppl {:5.4f} | dis acc {:5.4f} | dis loss {:5.4f} |'
                .format(ae_acc, loss_rec.item(), ae_ppl, clf_acc,
                        loss_dis.item()))
            add_log(
                args,
                'Train, avg : rec acc {:5.4f} | rec loss {:5.4f} | ppl {:5.4f} |  dis acc {:5.4f} | dis loss {:5.4f} |'
                .format(avg_ae_acc, avg_ae_loss, avg_ae_ppl, avg_clf_acc,
                        avg_clf_loss))
            if flag_rec:
                i = random.randint(0, len(tensor_tgt_y) - 1)
                reference = id2text_sentence(tensor_tgt_y[i], args.id_to_word)
                add_log(args, "input : %s" % reference)
                generator_text = ae_model.greedy_decode(
                    latent,
                    max_len=args.max_sequence_length,
                    start_id=args.id_bos)
                # batch_sentences
                hypothesis = id2text_sentence(generator_text[i],
                                              args.id_to_word)
                add_log(args, "gen : %s" % hypothesis)
                add_log(
                    args, "bleu : %s" %
                    calc_bleu(reference.split(" "), hypothesis.split(" ")))

    s = {}
    L = data_loader.num_batch + eps
    s["train_ae_loss"] = loss_ae / L
    s["train_ae_acc"] = 100. * n_valid_ae / (n_words_ae + eps)
    s["train_ae_ppl"] = np.exp(xe_loss_ae / (n_words_ae + eps))
    s["train_clf_loss"] = loss_clf / L
    s["train_clf_acc"] = 100. * n_valid_clf / (total_clf + eps)

    add_log(
        args, '| end of epoch {:3d} | time: {:5.2f}s |'.format(
            epoch, (time.time() - epoch_start_time)))

    add_log(
        args,
        '| rec acc {:5.4f} | rec loss {:5.4f} | rec ppl {:5.4f} | dis acc {:5.4f} | dis loss {:5.4f} |'
        .format(s["train_ae_acc"], s["train_ae_loss"], s["train_ae_ppl"],
                s["train_clf_acc"], s["train_clf_loss"]))
    return s