def task():
        while RUN:
            bug_beans = zen_tao_client.get_my_bug()

            # 过滤结果,并将最新数据放入缓存
            new_beans = cache.filter_new("bug", bug_beans)
            cache.add("bug", bug_beans)

            if len(new_beans) != 0:
                tools.print_log(__name__ + "." + sys._getframe().f_code.co_name,
                                "本次显示的bug:" + str(new_beans))

                for bean in new_beans:
                    msg_title = "当前有Bug指向您"
                    msg_content = bean.title + "\n\t\n"
                    msg_content += "级别:" + bean.level + "\n" \
                                   + "类型:" + bean.error_type + "\n" \
                                   + "From:" + bean.author
                    tools.show_notify(msg_title, msg_content)
            else:
                pass

            if len(new_beans) != 0:
                sleep_time = len(new_beans) * 13
            else:
                sleep_time = 1
            time.sleep(sleep_time)
def __request_data(url, parser):
    """

    :param url:
    :param parser:
    :return: bean list
    """
    html_doc = __request_html(url)
    parser.feed(html_doc)
    tools.print_log(__name__ + "." + sys._getframe().f_code.co_name,
                    url + "\n" + str(parser.bean_list))
    return parser.bean_list
def __request_data(url, parser):
    """

    :param url:
    :param parser:
    :return: bean list
    """
    html_doc = __request_html(url)
    parser.feed(html_doc)
    tools.print_log(__name__ + "." + sys._getframe().f_code.co_name,
                    url + "\n" + str(parser.bean_list))
    return parser.bean_list
def __request_html(url, post_data=None):
    """
    成功返回html document,否则返回None
    :return: str
    """
    html_doc = ""
    try:
        request = urllib2.Request(url, post_data)
        response = urllib2.urlopen(request, timeout=10)
        html_doc = str(response.read())
    except Exception as e:
        tools.print_log(
            __name__ + "." + sys._getframe().f_code.co_name,
            "something error at:" + "\n" + url + "\nexception message:" +
            str(e.message))
    finally:
        return html_doc
def __request_html(url, post_data=None):
    """
    成功返回html document,否则返回None
    :return: str
    """
    html_doc = ""
    try:
        request = urllib2.Request(url, post_data)
        response = urllib2.urlopen(request, timeout=10)
        html_doc = str(response.read())
    except Exception as e:
        tools.print_log(__name__ + "." + sys._getframe().f_code.co_name,
                        "something error at:" + "\n"
                        + url
                        + "\nexception message:"
                        + str(e.message))
    finally:
        return html_doc
예제 #6
0
def error(s):
    print_log(s, 'ERROR')
    log.error(s)
예제 #7
0
def info(s):
    print_log(s)
    log.info(s)
예제 #8
0
filenm = 'import-units'
pid_file = '/var/run/%s.pid' % filenm
pid = demon.make_pid(pid_file)
log = demon.Log('/var/log/%s.log' % filenm)

eng_src = create_engine(db_url_src)
eng_dst = create_engine(db_url_dst)

eng_src.debug = True
#sql = 'select count(*) from dbo.tblUnit'

sql = 'SELECT count(*) count  FROM admin.users2'
q = eng_src.execute(sql)
count = q.fetchone().count
msg = 'Ada %d baris yang akan diselaraskan' % count
print_log(msg)
if count:
    log.info(msg)
    sources = get_syncs()
    row = 0
    awal = time()
    log_row = 0
    for source in sources.fetchall():
        row += 1
        log_row += 1
        try:
            insert()
        except Exception, e:
            error(e[0])
            sys.exit()
        durasi = time() - awal
예제 #9
0
def exit_handler(signum, frame):
    tools.print_log(__name__ + "." + sys._getframe().f_code.co_name,
                    "exit sign"
                    + "\nsignum=" + str(signum)
                    + "\nframe=" + str(frame))
    loop_task.RUN = False
예제 #10
0
def error(s):
    print_log(s, 'ERROR')
    log.error(s)    
예제 #11
0
if not os.path.exists(args.output):
    os.makedirs(args.output)

save_dir = os.path.join(args.output, args.name)
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
args.save_dir = save_dir
args.save = os.path.join(save_dir, args.name)

# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
    if not args.cuda:
        tools.print_log(
            args.save,
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )
    else:
        torch.cuda.manual_seed(args.seed)

###############################################################################
# Load data
###############################################################################


def model_save(fn):
    if args.philly:
        fn = os.path.join(os.environ['PT_OUTPUT_DIR'], fn)
    with open(fn, 'wb') as f:
        torch.save([model, criterion, optimizer, epoch], f)
def test(model, corpus, cuda, prt=False):
    model.eval()

    prec_list = []
    reca_list = []
    f1_list = []

    pred_tree_list = []
    targ_tree_list = []

    nsens = 0
    word2idx = corpus.dictionary.word2idx
    if args.wsj10:
        dataset = zip(corpus.train_sens, corpus.train_trees, corpus.train_nltktrees)
    else:
        dataset = zip(corpus.test_sens, corpus.test_trees, corpus.test_nltktrees)

    corpus_sys = {}
    corpus_ref = {}
    for sen, sen_tree, sen_nltktree in dataset:
        if args.wsj10 and len(sen) > 12:
            continue
        x = np.array([word2idx[w] if w in word2idx else word2idx['<unk>'] for w in sen])
        input = Variable(torch.LongTensor(x[:, None]))
        if cuda:
            input = input.cuda()

        hidden = model.init_hidden(1)
        _, hidden = model(input, hidden)

        distance = model.distance[0].squeeze().data.cpu().numpy()
        distance_in = model.distance[1].squeeze().data.cpu().numpy()

        nsens += 1
        if prt and nsens % 100 == 0:
            for i in range(len(sen)):
                tools.print_log(args.save, '%15s\t%s\t%s' % (sen[i], str(distance[:, i]), str(distance_in[:, i])))
            tools.print_log(args.save, 'Standard output:{}'.format(sen_tree))

        sen_cut = sen[1:-1]
        # gates = distance.mean(axis=0)
        for gates in [
            # distance[0],
            distance[1],
            # distance[2],
            # distance.mean(axis=0)
        ]:
            depth = gates[1:-1]
            parse_tree = build_tree(depth, sen_cut)

            corpus_sys[nsens] = MRG(parse_tree)
            corpus_ref[nsens] = MRG_labeled(sen_nltktree)

            pred_tree_list.append(parse_tree)
            targ_tree_list.append(sen_tree)

            model_out, _ = get_brackets(parse_tree)
            std_out, _ = get_brackets(sen_tree)
            overlap = model_out.intersection(std_out)

            prec = float(len(overlap)) / (len(model_out) + 1e-8)
            reca = float(len(overlap)) / (len(std_out) + 1e-8)
            if len(std_out) == 0:
                reca = 1.
                if len(model_out) == 0:
                    prec = 1.
            f1 = 2 * prec * reca / (prec + reca + 1e-8)
            prec_list.append(prec)
            reca_list.append(reca)
            f1_list.append(f1)

            if prt and nsens % 100 == 0:
                tools.print_log(args.save, 'Model output:{}'.format(parse_tree))
                tools.print_log(args.save, 'Prec: %f, Reca: %f, F1: %f' % (prec, reca, f1))

        if prt and nsens % 100 == 0:
            tools.print_log(args.save + '.log', '-' * 80)

            f, axarr = plt.subplots(3, sharex=True, figsize=(distance.shape[1] // 2, 6))
            axarr[0].bar(np.arange(distance.shape[1])-0.2, distance[0], width=0.4)
            axarr[0].bar(np.arange(distance_in.shape[1])+0.2, distance_in[0], width=0.4)
            axarr[0].set_ylim([0., 1.])
            axarr[0].set_ylabel('1st layer')
            axarr[1].bar(np.arange(distance.shape[1]) - 0.2, distance[1], width=0.4)
            axarr[1].bar(np.arange(distance_in.shape[1]) + 0.2, distance_in[1], width=0.4)
            axarr[1].set_ylim([0., 1.])
            axarr[1].set_ylabel('2nd layer')
            axarr[2].bar(np.arange(distance.shape[1]) - 0.2, distance[2], width=0.4)
            axarr[2].bar(np.arange(distance_in.shape[1]) + 0.2, distance_in[2], width=0.4)
            axarr[2].set_ylim([0., 1.])
            axarr[2].set_ylabel('3rd layer')
            plt.sca(axarr[2])
            plt.xlim(left=-0.5, right=distance.shape[1] - 0.5)
            plt.xticks(np.arange(distance.shape[1]), sen, fontsize=10, rotation=45)

            figure_dir = model_dir + '/figure/'
            if not os.path.exists(figure_dir):
                os.makedirs(figure_dir)
            plt.savefig(figure_dir+'%d.png' % (nsens))
            plt.close()

    prec_list, reca_list, f1_list \
        = np.array(prec_list).reshape((-1,1)), np.array(reca_list).reshape((-1,1)), np.array(f1_list).reshape((-1,1))
    if prt:
        tools.print_log(args.save, '-' * 80)
        np.set_printoptions(precision=1)
        tools.print_log(args.save,
                        'Mean Prec:{:.1f}, Mean Reca:{:.1f}, Mean F1:{:.1f}'.format(100.0 * prec_list.mean(axis=0)[0],
                                                                                    100.0 * reca_list.mean(axis=0)[0],
                                                                                    100.0 * f1_list.mean(axis=0)[0]))
        tools.print_log(args.save, 'Number of sentence: %i' % nsens)

        correct, total = corpus_stats_labeled(corpus_sys, corpus_ref)
        tools.print_log(args.save, correct)
        tools.print_log(args.save, total)
        tools.print_log(args.save, 'ADJP:{}, {}, {:.1f}'.format(correct['ADJP'], total['ADJP'],
                                                                100 * correct['ADJP'] / total['ADJP']))
        tools.print_log(args.save,
                        'NP::{}, {}, {:.1f}'.format(correct['NP'], total['NP'], 100 * correct['NP'] / total['NP']))
        tools.print_log(args.save,
                        'PP::{}, {}, {:.1f}'.format(correct['PP'], total['PP'], 100 * correct['PP'] / total['PP']))
        tools.print_log(args.save, 'INTJ::{}, {}, {:.1f}'.format(correct['INTJ'], total['INTJ'],
                                                                 100 * correct['INTJ'] / total['INTJ']))
        tools.print_log(args.save, 'Averaged Depth {:.1f}'.format(corpus_average_depth(corpus_sys)))
        evalb(pred_tree_list, targ_tree_list)
        tools.print_log(args.save,
                        '{:.1f},' ',{:.1f},{:.1f},{:.1f},{:.1f},{:.1f}'.format(100 * prec_list.mean(axis=0)[0],
                                                                               corpus_average_depth(corpus_sys),
                                                                               100 * correct['ADJP'] / total['ADJP'],
                                                                               100 * correct['NP'] / total['NP'],
                                                                               100 * correct['PP'] / total['PP'],
                                                                               100 * correct['INTJ'] / total['INTJ']))
    return f1_list.mean(axis=0)
    # Load model
    with open(args.checkpoint, 'rb') as f:
        model, _, _ = torch.load(f)
        torch.cuda.manual_seed(args.seed)
        model.cpu()
        if args.cuda:
            model.cuda()
            torch.cuda.synchronize()

    # Load data
    import hashlib

    # fn = 'corpus.{}.data'.format(hashlib.md5('data/penn/'.encode()).hexdigest())
    fn = 'corpus.{}.data'.format(hashlib.md5((args.data_train + args.wvec).encode()).hexdigest())
    # fn = 'corpus.{}.data'.format(hashlib.md5((args.data + args.wvec).encode()).hexdigest())
    tools.print_log(args.save, 'Loading cached dataset...')
    corpus = torch.load(fn)
    dictionary = corpus.dictionary

    # test_batch_size = 1
    # test_data = batchify(corpus.test, test_batch_size, args)
    # test_loss = evaluate(test_data, test_batch_size)
    # print('=' * 89)
    # print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(
    #     test_loss, math.exp(test_loss), test_loss / math.log(2)))
    # print('=' * 89)

    tools.print_log(args.save, 'Loading PTB dataset...')
    if args.wvec:
        word2idx = tools.pkl_loader(os.path.join('data/wordvec', args.wvec, 'words2idx'))
        idx2word = tools.pkl_loader(os.path.join('data/wordvec', args.wvec, 'idx2words'))
예제 #14
0
def train(env, agt, restore):
    n_trials = 9999999999
    quit = False
    max_index = 0
    for trial in xrange(max_index + 1, n_trials):
        # time.sleep(3)
        print "\tSimulator.run(): Trial {}".format(trial)  # [debug]
        if not agt.test:
            if trial > 80000 and trial < 150000:
                agt.epsilon = 0.3
            elif trial > 150000 and trial < 250000:
                agt.epsilon = 0.2
            elif trial > 250000:
                agt.epsilon = 20000 / float(
                    trial)  # changed to this when trial >= 2300000

        env.reset()

        while True:
            try:
                agt.update()
                env.step()
            except KeyboardInterrupt:
                quit = True
            finally:
                if env.done or quit:
                    agt.update_epsilon()
                    break
        if trial % args.TARGET_UPDATE_CYCLE == 0:
            agt.target_net.load_state_dict(agt.policy_net.state_dict())

        if trial % args.TEST_INTERVAL == 0:
            _replayMemoryImageCheck_forTest(agt, trial)

            total_runs = env.succ_times + env.hit_wall_times + env.hit_car_times + env.num_hit_time_limit \
                         + env.num_out_of_time
            succ_rate = env.succ_times / float(total_runs)
            hit_cars_rate = env.hit_car_times / float(total_runs)
            hit_wall_rate = env.hit_wall_times / float(total_runs)
            hit_hard_time_limit_rate = env.num_hit_time_limit / float(
                total_runs)
            out_of_time_rate = env.num_out_of_time / float(total_runs)

            print_log(
                '***********************************************************************',
                log)
            print_log('n_episode:{}'.format(trial), log)
            print_log(
                'successful trials / total runs: {}/{}'.format(
                    env.succ_times, total_runs), log)
            print_log(
                'number of trials that hit cars: {}'.format(env.hit_car_times),
                log)
            print_log(
                'number of trials that hit walls: {}'.format(
                    env.hit_wall_times), log)
            print_log(
                'number of trials that hit the hard time limit: {}'.format(
                    env.num_hit_time_limit), log)
            print_log(
                'number of trials that ran out of time: {}'.format(
                    env.num_out_of_time), log)
            print_log('successful rate: {}'.format(succ_rate), log)
            print_log('hit cars rate: {}'.format(hit_cars_rate), log)
            print_log('hit wall rate: {}'.format(hit_wall_rate), log)
            print_log(
                'hit hard time limit rate: {}'.format(
                    hit_hard_time_limit_rate), log)
            print_log('out of time rate: {}'.format(out_of_time_rate), log)
            print_log(
                '**********************************************************************',
                log)
            '''
            if agt.test:
                rates_file = os.path.join(data_path, 'rates' + '.cpickle')
                rates={}
                if os.path.exists(rates_file):
                    with open(rates_file, 'rb') as f:
                        rates = cPickle.load(f)
                        os.remove(rates_file)
                rates[trial] = {'succ_rate':succ_rate, 'hit_cars_rate':hit_cars_rate, 'hit_wall_rate':hit_wall_rate, \
                                 'hit_hard_time_limit_rate':hit_hard_time_limit_rate, 'out_of_time_rate':out_of_time_rate}

                with open(rates_file, 'wb') as f:
                    cPickle.dump(rates, f, protocol=cPickle.HIGHEST_PROTOCOL)
            '''

            env.clear_count()
        '''
예제 #15
0
                    help='evaluation inverval')

args = parser.parse_args()

filename = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log = open(os.path.join(args.SAVE_PATH, 'RLPark_Log_' + filename + '.txt'),
           'w')
args.log = log

Transition = namedtuple('Transition',
                        ('state', 'action', 'next_state', 'reward'))
state = namedtuple('state', ('state_img', 'state_tuple'))
state_tuple = namedtuple('state_tuple',
                         ('x', 'y', 'theta_heading', 's', 'theta_steering'))

print_log("pytorch version : {}".format(torch.__version__), log)
print_log("------[Initial parameters]------", log)
print_log("Initial epsilon: {}".format(args.epsilon), log)
print_log("Epsilon decay rate: {}".format(args.eps_decay), log)
print_log("Batch size: {}".format(args.BATCH_SIZE), log)
print_log("Learning rate: {}".format(args.lrate), log)
print_log("Discount factor(gamma): {}".format(args.discount), log)

#print "==========>", linear1.device
#print "==========>", self.linear1.weight.device


class LearningAgent(Agent):
    """An agent that learns to automatic parking"""
    def __init__(self, env, is_test=False):
        super(LearningAgent, self).__init__()
예제 #16
0
파일: calc_data.py 프로젝트: aagusti/sp2d

if option.configure:
    init_db()
    sys.exit()

pid = demon.make_pid(pid_file)
log = demon.Log("/var/log/%s.log" % SYNC_TABLE)

session = create_session()

# sync = Sync.create()
row = session.query(func.count(Sync.id).label("c")).first()
count = row.c
msg = "Ada %d baris yang akan diselaraskan" % count
print_log(msg)
if not count:
    os.remove(pid_file)
    sys.exit()

log.info(msg)
sources = (
    session.query(PembayaranPg, Sync)
    .filter(
        Sync.kd_propinsi == PembayaranPg.kd_propinsi,
        Sync.kd_dati2 == PembayaranPg.kd_dati2,
        Sync.kd_kecamatan == PembayaranPg.kd_kecamatan,
        Sync.kd_kelurahan == PembayaranPg.kd_kelurahan,
        Sync.kd_blok == PembayaranPg.kd_blok,
        Sync.no_urut == PembayaranPg.no_urut,
        Sync.kd_jns_op == PembayaranPg.kd_jns_op,
예제 #17
0
파일: sap.py 프로젝트: aagusti/sp2d
def error(s):
    print_log(s, "ERROR")
    log.error(s)
예제 #18
0
def info(s):
    print_log(s)
    log.info(s)
예제 #19
0
def train():
    # Turn on training mode which enables dropout.
    if args.model == 'QRNN': model.reset()
    total_loss = 0
    start_time = time.time()
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(args.batch_size)
    batch, i = 0, 0
    while i < train_data.size(0) - 1 - 1:
        bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
        # Prevent excessively small or negative sequence lengths
        seq_len = max(5, int(np.random.normal(bptt, 5)))
        # There's a very small chance that it could select a very long sequence length resulting in OOM
        # seq_len = min(seq_len, args.bptt + 10)

        lr2 = optimizer.param_groups[0]['lr']
        optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
        model.train()
        data, targets = get_batch(train_data, i, args, seq_len=seq_len)

        # Starting each batch, we detach the hidden state from how it was previously produced.
        # If we didn't, the model would try backpropagating all the way to start of the dataset.
        hidden = repackage_hidden(hidden)
        optimizer.zero_grad()

        output, hidden, rnn_hs, dropped_rnn_hs = model(data,
                                                       hidden,
                                                       return_h=True)
        # output, hidden = model(data, hidden, return_h=False)
        raw_loss = criterion(model.decoder.weight, model.decoder.bias, output,
                             targets)

        loss = raw_loss
        # Activiation Regularization
        if args.alpha:
            loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean()
                              for dropped_rnn_h in dropped_rnn_hs[-1:])
        # Temporal Activation Regularization (slowness)
        if args.beta:
            loss = loss + sum(args.beta *
                              (rnn_h[1:] - rnn_h[:-1]).pow(2).mean()
                              for rnn_h in rnn_hs[-1:])
        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        if args.clip: torch.nn.utils.clip_grad_norm_(params, args.clip)
        optimizer.step()

        total_loss += raw_loss.data
        optimizer.param_groups[0]['lr'] = lr2
        if batch % args.log_interval == 0 and batch > 0:
            cur_loss = total_loss.item() / args.log_interval
            elapsed = time.time() - start_time
            tools.print_log(
                args.save,
                '| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '
                'loss {:5.2f} | ppl {:8.2f} | bpc {:8.3f}'.format(
                    epoch, batch,
                    len(train_data) // args.bptt,
                    optimizer.param_groups[0]['lr'],
                    elapsed * 1000 / args.log_interval, cur_loss,
                    math.exp(cur_loss), cur_loss / math.log(2)))
            total_loss = 0
            start_time = time.time()
        ###
        batch += 1
        i += seq_len
def exit_handler(signum, frame):
    tools.print_log(
        __name__ + "." + sys._getframe().f_code.co_name,
        "exit sign" + "\nsignum=" + str(signum) + "\nframe=" + str(frame))
    loop_task.RUN = False