Пример #1
0
Файл: command.py Проект: wigy/q
 def wr(self, *msg, **kwargs):
     """
     Print out run time message using command name as channel unless given as channel=<Name>.
     """
     from q import Q
     channel = kwargs.get('channel')
     if not channel:
         channel = '@' + self.__class__.__name__[7:]
     Q.wr(channel, *msg)
Пример #2
0
 def create_ticket(self, ticket):
     from q import Q
     proxy = self.proxy()
     id = proxy.ticket.create(ticket['Title'], ticket['Notes'], {
         'status': '-',
         'substatus': '-',
         'milestone': self.settings.TRAC_INITIAL_MILESTONE})
     if not id:
         raise QError("Failed to create new ticket.")
     Q.wr('Ticketing', "New ticket %r created in the remote." % id)
     return str(id)
Пример #3
0
 def wr(self, *msg, **kwargs):
     """
     Print out run time message.
     """
     from q import Q
     channel = kwargs.get('channel')
     if not channel:
         if self.code:
             channel = "#" + self.code
         else:
             channel = "#NoTicket"
     Q.wr(channel, *msg, **kwargs)
def print_level_order(tree):
    """Print tree values by depth."""
    q = Q()
    q2 = Q()
    q.enqueue(tree.root)
    container = ''
    while q or q2:
        if not q:
            q, q2 = q2, q
            container += '\n'
        node = q.dequeue()
        for child in node.children:
            q2.enqueue(child)
        container += str(node.val)
    return container
Пример #5
0
Файл: project.py Проект: wigy/q
 def all_codes(self):
     """
     Collect a list of all ticket codes of this project.
     """
     from q import Q
     if not self.settings.WORKDIR:
         raise QError("Ticket storage directory WORKDIR is not set.")
     if not os.path.isdir(self.settings.WORKDIR):
         Q.wr("Initialize", "Creating ticket directory '%s'.",
              self.settings.WORKDIR)
         mkpath(self.settings.WORKDIR)
     ret = []
     for p in os.listdir(self.settings.WORKDIR):
         if os.path.isfile(self.settings.WORKDIR + "/" + p + "/README"):
             ret.append(p)
     return ret
Пример #6
0
 def setUp(self):
     self.q = Q()
     self.q.throttle = {'settle': 1, 'pause': 2, 'maxjobs': 10}
     self.q.script = "test.sh"
     self.q.pattern = "txt"
     self.q.template = "templates/job.qsub"
     for i in range(3):
         self.q.enqueue("file" + str(i) + ".txt")
Пример #7
0
    def testPositiveHeavyDomain(self):
        steps = list(range(-10, 21, 4))

        fs = [-1, -.75, -.5, -.25, .25, .5, .75, 1]

        expected = [-10, -6, -2, 2, 6, 10, 14, 18]
        actual = Q(steps, fs)
        self.assertEqual(expected, list(actual))
Пример #8
0
    def testNegativeHeavyDomain(self):
        steps = list(range(-20, 11, 4))

        fs = [-1, -.75, -.5, -.25, .25, .5, .75, 1]

        expected = [-20, -16, -12, -8, -4, 0, 4, 8]
        actual = Q(steps, fs)
        self.assertEqual(expected, list(actual))
Пример #9
0
    def testEvenStepCount(self):
        steps = range(-10, 21, 3)
        steps = list(steps)

        fs = [-1, -.8, -.6, -.4, -.2, 0, .2, .4, .6, .8, 1]

        expected = [-10, -7, -4, -1, 2, 5, 8, 11, 14, 17, 20]
        actual = Q(steps, fs)
        self.assertEqual(expected, list(actual))
Пример #10
0
    def testFullRange(self):
        steps = range(-5, 6, 1)
        steps = list(steps)

        fs = [-1, -.8, -.6, -.4, -.2, 0, .2, .4, .6, .8, 1]

        expected = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]
        actual = Q(steps, fs)
        self.assertEqual(expected, list(actual))
Пример #11
0
 def _set_ticket(self, code, comment, **attributes):
     """
     Fetch the ticket data for code.
     """
     from q import Q
     Q.wr('Ticketing', "Updating remote ticket %r:" % code)
     for k in attributes:
         parts = unicode(attributes[k]).split("\n")
         if len(parts) == 1:
             v = parts[0]
         else:
             v = "\\n ".join(parts)
         Q.wr('Ticketing', "  %s = %s" % (k, v))
     proxy = self.proxy()
     try:
         proxy.ticket.update(code, comment, attributes)
     except xmlrpclib.Fault:
         raise QError("Cannot find ticket '" + code + "'.")
Пример #12
0
def main(argv):
    watch = False
    pattern = ""
    script = None
    f = lambda x: bool(re.search(pattern, x))
    q = Q()

    try:
        opts, args = getopt.getopt(argv, "kvm:P:p:s:")
    except getopt.GetoptError as err:
        print 'Usage: jobber.py -s /your/script.sh  [-kv] [-m maxjobs] [-P port] [-p pattern] [/path/to/data/directory]'
        print(err)
        sys.exit(0)

    for opt, arg in opts:
        if opt == '-P':
            q.port = arg
        if opt == '-k':
            q.keep_files = True
        if opt == '-v':
            q.verbose = True
        if opt == '-p':
            q.pattern = arg
        if opt == '-s':
            q.script = arg
        if opt == '-m':
            q.maxjobs = int(arg)

    if not q.script:
        print 'No script specified (-s options is required).  Exiting...'
        sys.exit(0)

    q.script = os.path.abspath(q.script)

    path = args[0] if len(args) > 0 else '.'

    filelist = []

    for root, dirs, files in os.walk(os.path.abspath(path)):
        for file in files:
            filelist.append(os.path.abspath(os.path.join(root, file)))

    for f in filelist:
        q.enqueue(f)

    q.start_server()
    q.process()

    while q.running():
        q.poll()

    print("Jobs complete.  Shutting down spool...")
    q.stop_server()

    return (0)
 def breadth_first_traversal(self, operation):
     """Breadth first traversal."""
     q = Q()
     temp_root = self.root
     q.enqueue(temp_root)
     while temp_root:
         for children in temp_root.children:
             q.enqueue(children)
         operation(temp_root)
         q.dequeue()
         if q.front is None:
             break
         temp_root = q.front.val
Пример #14
0
    def review_start(self, ticket, file):

        if not self._vsts_query:
            raise QError(
                "When using ReviewByVSTS, also TicketingByVSTS must be in user."
            )


# TODO: VSTS: Enable once this is working
#
#        repo_id = self._find_vsts_git_repository()
#        text = self.edit_review_comments(ticket)
#        params = {
#            'sourceRefName': 'refs/heads/' + ticket['Branch'],
#            'targetRefName': 'refs/heads/master',
#            'title': self.get_review_title(ticket),
#            'description': text,
#            'reviewers': []
#        }
#        print params
#        data = self._vsts_query('git/repositories/' + repo_id + '/pullrequests', post=params)
#        print data

        data = self._vsts_query('wit/workitems/' + ticket.code)
        if 'Microsoft.VSTS.Scheduling.RemainingWork' in data['fields']:
            patch = [{
                'op': 'replace',
                'path': '/fields/Microsoft.VSTS.Scheduling.RemainingWork',
                'value': 0.0
            }]
            self._vsts_query('wit/workitems/' + ticket.code, patch=patch)

        from q import Q
        Q.wr('Review', 'Finish review manually:')
        Q.wr(
            'Review',
            'https://phones.visualstudio.com/DefaultCollection/PhonesSW/SWCustomizationVT/_git/Z_Neo_Service/pullrequests'
        )
        return '1'
Пример #15
0
def q_detail():
    if request.method == 'POST':
        get_data = json.loads(request.get_data(as_text=True))
        param = {'id_of_setq': get_data['idq']}
        q = Q()
        a = Ans()
        qdata = q.q_detail(param)
        adata = a.allans(param)
        if qdata == None:
            resData = {"resCode": 1, "data": [], "message": '查询失败'}
            return jsonify(resData)
        resData = {"resCode": 0, "data": [qdata, adata], "message": '查询成功'}
        return jsonify(resData)
    else:
        resData = {"resCode": 1, "data": [], "message": '请求方式错误'}
        return jsonify(resData)
Пример #16
0
def new_Q():
    if request.method == 'POST':
        get_data = json.loads(request.get_data(as_text=True))
        param = {
            'id_of_setq': get_data['setq_id'],
            'desc': get_data['desc'],
            'num': get_data['num']
        }
        q = Q()
        data = q.new_Q(param)
        if data == False:
            resData = {"resCode": 1, "data": [], "message": '添加失败'}
            return jsonify(resData)
        resData = {"resCode": 0, "data": data, "message": '已添加该问题'}
        return jsonify(resData)
    else:
        resData = {"resCode": 1, "data": [], "message": '请求方式错误'}
        return jsonify(resData)
Пример #17
0
def setq_detail():
    if request.method == 'POST':
        get_data = json.loads(request.get_data(as_text=True))
        param = {'id_setq': get_data['setq_id']}
        sq = SetQ()
        q = Q()
        sqdata = sq.setq_detail(param)
        qdata, ansdata = q.allq(param)
        if sqdata == None:
            resData = {'resCode': 1, 'data': [], 'message': '未查询到该问题集合'}
            return jsonify(resData)
        resData = {
            'resCode': 0,
            'data': [sqdata, qdata, ansdata],
            'message': '查询成功'
        }
        return jsonify(resData)
    else:
        resData = {"resCode": 1, "data": [], "message": '请求方式错误'}
        return jsonify(resData)
Пример #18
0
def perturb_two():
    if request.method == 'POST':
        get_data = json.loads(request.get_data(as_text=True))
        param = {
            'id_setq': get_data['setq_id'],
            # 'b' : get_data['b'],
            # 提交一份对所有问题的回答(qid 和 ans),并将所有回答按参数扰动
            'ans': get_data['ans_list']
        }
        re = []
        # param['ans'] = [[],[],[],[]]
        # 根据qid找到问题的回答数量s,按概率扰动
        sq = SetQ()
        param1 = {'id_setq': get_data['setq_id']}
        b = 2 * float(sq.setq_pf(param1)['setQ_pf'])
        print('b = ', b)
        pro = np.random.uniform(0, b)
        print('保持不变概率 ', 1 - pro)
        for i in param['ans']:
            # i[0]  qid
            # i[1]  ans
            print('原回答 ', i[1])
            q = Q()
            param = {'id': i[0]}
            data = q.q_detail(param)
            print('问题答案数量 ', data['q_num_of_ans'])
            s = data['q_num_of_ans']
            pro1 = np.random.uniform(0, 1)
            print(pro1)
            if pro1 <= 1 - pro:
                i[1] = i[1]
            else:
                option = []
                for num in range(s):
                    if num + 1 != i[1]:
                        option.append(num + 1)
                i[1] = random.sample(option, s - 1)[0]
            print('扰动结果', i[1])
            re.append(i)
        resData = {"resCode": 0, "data": [pro, re], "message": '返回扰动结果'}
        return jsonify(resData)
Пример #19
0
def q(val, builder=QEvaluator(), **kwargs):
    return Q(builder, val, kwargs)
Пример #20
0
def project():
    if request.method == 'POST':
        get_data = json.loads(request.get_data(as_text=True))
        # 1.准备数据
        # 具体来说,获取数据库中所有该问题集合的问题,和所有用户的回答
        # q_list[]
        # ans1[]
        # ans2[]
        # ans1[i][j] 问题i 用户j
        # weight[i] 用户i的权重
        user_list = []
        q_list = []
        ans1 = []
        ans2 = []
        # 1.1找出所有问题id
        param = {'id_setq': get_data['setq_id']}
        q = Q()
        data = q.allq_id(param)
        for i in data:
            q_list.append(i['idq'])
            ans1.append([])
            ans2.append([])
        print('q_list = ', q_list)
        # 1.2根据问题id确定回答问题的用户id
        # for i in range(len(q_list)):
        param = {'id_q': q_list[0]}
        a = Ans()
        data = a.allans_userid(param)

        for item in data:
            if item['user_id'] not in user_list:
                user_list.append(item['user_id'])
        print('user_list = ', data)
        # 1.3对应问题id和用户id的所有回答,ans1和ans2
        for i in range(len(q_list)):
            for j in user_list:
                param = {'id_q': q_list[i], 'id_user': j}
                a = Ans()
                data = a.allans_q_user(param)
                for item in data:
                    ans1[i].append(item['ans_one'])
                    ans2[i].append(item['ans_two'])
        print('ans1 = ', ans1)
        print('ans2 = ', ans2)

        # 2.聚合结果
        # MV-one
        # MV-two
        Data = []

        q_MV_one = MV(ans1)
        Data.append(q_MV_one)

        for i in range(len(q_list)):
            param = {'id_q': q_list[i], 'ans': q_MV_one[i]}
            q = Q()
            q.set_q_MV_one(param)

        q_MV_two = MV(ans2)
        Data.append(q_MV_two)

        for i in range(len(q_list)):
            param = {'id_q': q_list[i], 'ans': q_MV_two[i]}
            q = Q()
            q.set_q_MV_two(param)
        # TD-one
        # TD-two
        # TD要返回对应的weight列表
        q_TD_one, weight_one = TD(user_list, q_list, ans1)
        Data.append(q_TD_one)

        for i in range(len(q_list)):
            param = {'id_q': q_list[i], 'ans': q_TD_one[i]}
            q = Q()
            q.set_q_TD_one(param)

        q_TD_two, weight_two = TD(user_list, q_list, ans2)
        Data.append(q_TD_two)

        for i in range(len(q_list)):
            param = {'id_q': q_list[i], 'ans': q_TD_two[i]}
            q = Q()
            q.set_q_TD_two(param)
        # 更新权重
        for i in range(len(user_list)):
            param = {
                'user_id': user_list[i],
                'setq_id': get_data['setq_id'],
                'weight1': weight_one[i],
                'weight2': weight_two[i]
            }
            w = Weight()
            w.new_weight(param)

        resData = {"resCode": 0, "data": Data, "message": '返回聚合结果'}
        return jsonify(resData)
Пример #21
0
    tree = dendropy.Tree.get(path=sys.argv[2], schema='newick')

    size = len(codons)


    # frequency F0
    freq = np.ones(size) / size

    # model parameters
    w0 = tf.Variable(0.5, dtype=tf.float64)
    w2 = tf.Variable(4, dtype=tf.float64)
    p01sum = tf.Variable(0.5, dtype=tf.float64)
    p0prop = tf.Variable(0.867, dtype=tf.float64)
    k = tf.Variable(1.0, dtype=tf.float64)

    q0 = Q(w0, k, freq)
    q1 = Q(tf.constant(1.0, dtype=tf.float64), k, freq)
    q2 = Q(w2, k, freq)

    p0 = p0prop * p01sum
    p1 = p01sum - p0
    p2a = (1 - p0 - p1) * p0 / (p0 + p1)
    p2b = (1 - p0 - p1) * p1 / (p0 + p1)

    prop = [p0, p1, p2a, p2b]

    bg_scale = (p0 + p2a) * q0.scale + (p1 + p2b) * q1.scale
    fg_scale = p0 * q0.scale + p1 * q1.scale + (p2a + p2b) * q2.scale

    # save all the edge lengths
    edges = []
Пример #22
0
if __name__ == '__main__':
    ali = {r.id: to_codon(r.seq) for r in SeqIO.parse(sys.argv[1], 'fasta')}
    tree = dendropy.Tree.get(path=sys.argv[2], schema='newick')

    size = len(codons)


    # frequency F0
    freq = np.ones(size) / size

    # omega
    w = tf.Variable(0.2, dtype=tf.float64)
    # kappa
    k = tf.Variable(2.0, dtype=tf.float64)

    q = Q(w, k, freq)


    # save all the edge lengths
    edges = []
    for node in tree.postorder_node_iter():
        if node.is_leaf():
            node.pl = tf.constant(np.transpose(np.array([n2v(c) for c in ali[node.taxon.label]])))
        else:
            for child in node.child_node_iter():
                el = tf.Variable(max(child.edge_length, 1e-9), dtype=tf.float64)
                edges.append(el)
                # if we don't want to optimize branches
                #el = tf.constant(max(child.edge_length, 1e-9), dtype=tf.float64)

                # compute p-vatrix
Пример #23
0
def run_tester(args, server):
    env = new_env(args)
    # env.configure()
    env.reset()
    env.max_history = args.eval_num
    if args.alg == 'A3C':
        agent = A3C(env, args)
    elif args.alg == 'Q':
        agent = Q(env, args)
    elif args.alg == 'VPN':
        agent = VPN(env, args)
    else:
        raise ValueError('Invalid algorithm: ' + args.alg)

    device = 'gpu' if args.gpu > 0 else 'cpu'
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.15)
    config = tf.ConfigProto(device_filters=[
        "/job:ps", "/job:worker/task:{}/{}:0".format(args.task, device)
    ],
                            gpu_options=gpu_options,
                            allow_soft_placement=True)
    variables_to_save = [v for v in tf.global_variables() if \
                not v.name.startswith("global") and not v.name.startswith("local/target/")]
    global_variables = [
        v for v in tf.global_variables() if not v.name.startswith("local")
    ]

    init_op = tf.variables_initializer(global_variables)
    init_all_op = tf.global_variables_initializer()

    var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 tf.get_variable_scope().name)
    logger.info('Trainable vars:')
    for v in var_list:
        logger.info('  %s %s', v.name, v.get_shape())
    logger.info("Num parameters: %d", agent.local_network.num_param)

    def init_fn(ses):
        logger.info("Initializing all parameters.")
        ses.run(init_all_op)

    saver = FastSaver(variables_to_save, max_to_keep=0)
    sv = tf.train.Supervisor(
        is_chief=False,
        global_step=agent.global_step,
        summary_op=None,
        init_op=init_op,
        init_fn=init_fn,
        ready_op=tf.report_uninitialized_variables(global_variables),
        saver=saver,
        save_model_secs=0,
        save_summaries_secs=0)

    best_reward = -10000
    with sv.managed_session(server.target,
                            config=config) as sess, sess.as_default():
        epoch = args.eval_epoch
        while args.eval_freq * epoch <= args.max_step:
            path = os.path.join(args.log, "e%d" % epoch)
            if not os.path.exists(path + ".index"):
                time.sleep(10)
                continue
            logger.info("Start evaluation (Epoch %d)", epoch)
            saver.restore(sess, path)
            np.random.seed(args.seed)
            reward = evaluate(env,
                              agent.local_network,
                              args.eval_num,
                              eps=args.eps_eval)

            logfile = open(os.path.join(args.log, "eval.csv"), "a")
            print("Epoch: %d, Reward: %.2f" % (epoch, reward))
            logfile.write("%d, %.3f\n" % (epoch, reward))
            logfile.close()
            if reward > best_reward:
                best_reward = reward
                sv.saver.save(sess, os.path.join(args.log, 'best'))
                print("Saved to: %s" % os.path.join(args.log, 'best'))

            epoch += 1

    logger.info('tester stopped.')
Пример #24
0
def rl_optimizer(UImatrix, num_of_actions, top_UI, params, batch_num, logging):

    #global policies
    #global top_UI
    policies = [([])] * params.num_states
    num_states = params.num_states

    # Defining UI environment
    actionmatrix = range(num_of_actions)
    goal = False  # Default goal
    ui_env = UI(num_states, actionmatrix, num_of_actions, goal,
                params.sensor_errors, params.confusion_error, params.penalties,
                params.grid, params.dimensions, params.init_position,
                params.goal_position)
    av_table = ActionValueTable(num_states, num_of_actions, ui_env)
    av_table.initialize(1)

    # Train agent for each goal
    klm_tot = 0
    klm_avg = 0
    policies = []
    best_actions = []  # [0]*ui_env.num_of_states*(ui_env.num_of_states-1)
    objective = -1
    p_learned = 1
    ii = 0

    #########
    # Define Q-learning agent
    learner = Q(0.5, 0.99)  #Q(0.6, 0.99) # 0.5, 0.99
    learner.explorer.epsilon = 0.7  # 0.7 # 0.9
    learner.explorer.decay = 0.999  # 0.99
    learner.explorer.env = ui_env
    agent = LearningAgent(av_table, learner)

    # Define task and experiment
    task = UITask(ui_env)
    experiment = EpisodicExperiment(task, agent, av_table)
    #######

    #Removed bad actions, give action matric as input
    av_table.initialize(-5., actionmatrix)

    for j in range(8):  # Learning iterations

        initial_state = 0

        runs = 50  # Episodes in one iteration
        experiment.doEpisodes(runs)

        agent.learn()
        agent.reset()

        ##############################################
        # Save policy
        # For optimization
        p = list(av_table.params)  # Copies to new memory slot
        policies.append(p)

    ##############################################
    # Evaluation of UI and policy for current goal
    # Loop to get average : use only if errors used

    klm_tasks_tot = np.array([0.] * (params.num_states - 1))
    total_iterations = 1
    klm_tot = 0
    for i in range(total_iterations):
        # KLM value
        klm_g, best_path = evaluation(av_table, ui_env, task, False, params,
                                      batch_num, logging)
        if klm_g == -1:  # Not learned
            klm_tot += 20 * 5
            p_learned = 0
            print "error"
            break
        # Save to total KLM
        klm_tot += klm_g
    klm_avg += klm_tot / total_iterations

    return top_UI, objective, best_actions, best_path, klm_g
Пример #25
0
def run(args, server):
    env = new_env(args)
    if args.alg == 'A3C':
        trainer = A3C(env, args)
    elif args.alg == 'Q':
        trainer = Q(env, args)
    elif args.alg == 'VPN':
        env_off = new_env(args)
        env_off.verbose = 0
        env_off.reset()
        trainer = VPN(env, args, env_off=env_off)
    else:
        raise ValueError('Invalid algorithm: ' + args.alg)

    # Variable names that start with "local" are not saved in checkpoints.
    variables_to_save = [v for v in tf.global_variables() if \
                not v.name.startswith("global") and not v.name.startswith("local/target/")]
    global_variables = [
        v for v in tf.global_variables() if not v.name.startswith("local")
    ]

    init_op = tf.variables_initializer(global_variables)
    init_all_op = tf.global_variables_initializer()
    saver = FastSaver(variables_to_save, max_to_keep=0)

    var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 tf.get_variable_scope().name)
    logger.info('Trainable vars:')
    for v in var_list:
        logger.info('  %s %s', v.name, v.get_shape())
    logger.info("Num parameters: %d", trainer.local_network.num_param)

    def init_fn(ses):
        logger.info("Initializing all parameters.")
        ses.run(init_all_op)

    device = 'gpu' if args.gpu > 0 else 'cpu'
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.15)
    config = tf.ConfigProto(device_filters=[
        "/job:ps", "/job:worker/task:{}/{}:0".format(args.task, device)
    ],
                            gpu_options=gpu_options,
                            allow_soft_placement=True)
    logdir = os.path.join(args.log, 'train')
    summary_writer = tf.summary.FileWriter(logdir + "_%d" % args.task)

    logger.info("Events directory: %s_%s", logdir, args.task)
    sv = tf.train.Supervisor(
        is_chief=(args.task == 0),
        logdir=logdir,
        saver=saver,
        summary_op=None,
        init_op=init_op,
        init_fn=init_fn,
        summary_writer=summary_writer,
        ready_op=tf.report_uninitialized_variables(global_variables),
        global_step=trainer.global_step,
        save_model_secs=0,
        save_summaries_secs=30)

    logger.info(
        "Starting session. If this hangs, we're mostly likely waiting to connect to the parameter server. "
        +
        "One common cause is that the parameter server DNS name isn't resolving yet, or is misspecified."
    )
    with sv.managed_session(server.target,
                            config=config) as sess, sess.as_default():
        sess.run(trainer.sync)
        trainer.start(sess, summary_writer)
        global_step = sess.run(trainer.global_step)
        epoch = -1
        logger.info("Starting training at step=%d", global_step)
        while not sv.should_stop() and (not args.max_step
                                        or global_step < args.max_step):
            if args.task == 0 and int(global_step / args.eval_freq) > epoch:
                epoch = int(global_step / args.eval_freq)
                filename = os.path.join(args.log, 'e%d' % (epoch))
                sv.saver.save(sess, filename)
                sv.saver.save(sess, os.path.join(args.log, 'latest'))
                print("Saved to: %s" % filename)
            trainer.process(sess)
            global_step = sess.run(trainer.global_step)

        if args.task == 0 and int(global_step / args.eval_freq) > epoch:
            epoch = int(global_step / args.eval_freq)
            filename = os.path.join(args.log, 'e%d' % (epoch))
            sv.saver.save(sess, filename)
            sv.saver.save(sess, os.path.join(args.log, 'latest'))
            print("Saved to: %s" % filename)
    # Ask for all the services to stop.
    sv.stop()
    logger.info('reached %s steps. worker stopped.', global_step)
Пример #26
0
 def wr(self, *msg):
     """
     Print out run time message.
     """
     from q import Q
     Q.wr('*' + self.__class__.__name__, *msg)
Пример #27
0
 def __init__(self):
     self.eps = 1.0
     self.qlearner = Q()
Пример #28
0
        sleep(espaceLettre)

    elif x.upper() == 'N':
        N()
        sleep(espaceLettre)

    elif x.upper() == 'O':
        O()
        sleep(espaceLettre)

    elif x.upper() == 'P':
        P()
        sleep(espaceLettre)

    elif x.upper() == 'Q':
        Q()
        sleep(espaceLettre)

    elif x.upper() == 'R':
        R()
        sleep(espaceLettre)

    elif x.upper() == 'S':
        S()
        sleep(espaceLettre)

    elif x.upper() == 'T':
        T()
        sleep(espaceLettre)

    elif x.upper() == 'U':