Example #1
0
class Room:
    """
    Base class for Room
    """
    def __init__(self, x, y, name, description, exits):
        """
        :param x: room x
        :param y: room y
        :param name: room name
        :param description: room description
        """
        self.x = x
        self.y = y
        self.name = name
        self.description = description
        self.exits = exits
        self.log = MyLogger('Room')

    def __str__(self):
        self.log.warning('__str__')
        return f'{self.name}\n{self.description}'

    def _check_exit(self, direction):
        self.log.info('in init')
        return direction in self.exits
            return

        try:
            self.socketio_namespace.on('connect', self.onConnect)
            self.socketio_namespace.on('disconnect', self.onDisconnect)
            self.socketio_namespace.on('reconnect', self.onReconnect)
            logger.info("socketio.on() success")
        except Exception, e:
            logger.error("socketio.on() except {0}".format(e))

        try:
            self.socketio.wait(seconds=1)
        except Exception, e:
            logger.error("wait() except {0}".format(e))

        logger.info("Connected.")

    def _async_emit(self, event, data={'data': 0}):
        try:
            self.connect()
            if self.socketio:
                self.socketio_namespace.emit(event, data)
                self.disconnect()
        except Exception, e:
            logger.error("_async_emit() except {0}".format(e))

    def emit(self, event, data={'data': 0}):
        logger.info("emit to server: {} - {}".format(event, str(data)))
        try:
            Thread(target=self._async_emit, args=(event,),
                   kwargs={'data': data}, name='thread_async_emit').start()
Example #3
0
 for t_iter in range(train_iter):
     data_X, data_Y = dataloader.GetTrainingBatch(t_iter, train_batch, 300)
     feed_dic = {sent_mm.input_x: data_X, sent_mm.input_y: data_Y}
     _, step, loss, acc = sess.run(
         [sent_train_op, sent_global_step, sent_mm.loss, sent_mm.accuracy],
         feed_dic)
     sum_loss += loss
     sum_acc += acc
     if t_iter % 100 == 99:
         sum_loss = sum_loss / 100
         sum_acc = sum_acc / 100
         ret_acc = sum_acc
         print(get_curtime() + " Step: " + str(step) + " Training loss: " +
               str(sum_loss) + " accuracy: " + str(sum_acc))
         logger.info(get_curtime() + " Step: " + str(step) +
                     " Training loss: " + str(sum_loss) + " accuracy: " +
                     str(sum_acc))
         #             if sum_acc > 0.9:
         #                 break
         sum_acc = 0.0
         sum_loss = 0.0
 # for validation
 sum_acc = 0.0
 sum_loss = 0.0
 for t_iter in range(10):
     data_X, data_Y = dataloader.GetTestData(t_iter, test_batch, 300)
     feed_dic = {sent_mm.input_x: data_X, sent_mm.input_y: data_Y}
     loss, acc = sess.run([sent_mm.loss, sent_mm.accuracy], feed_dic)
     sum_loss += loss
     sum_acc += acc
 sum_loss = sum_loss / 100
Example #4
0
from logger import MyLogger

logger = MyLogger().logger


def test():
    logger.info('teset here')


if __name__ == '__main__':
    import rospy
    from std_msgs.msg import Int32, Empty

    rospy.init_node('build_classifier')
    rospy.Subscriber('/exercise/mode', Int32, test)

    print "Classifier launched. Listening to message..."
    logger.info('Classifier launched. Listening to message...')

    logger.info('main log')
    test()
    rospy.spin()
Example #5
0
            mm.x_len: x_len,
            mm.input_y: y,
            mm.init_states: init_states,
            mm.dropout_keep_prob: 1.0
        }
        _, step, loss, acc = sess.run(
            [df_train_op, df_global_step, mm.loss, mm.accuracy], feed_dic)
        counter += 1
        sum_acc += acc

    print(sum_acc / counter)


if __name__ == "__main__":
    print(get_curtime() + " Loading data ...")
    logger.info(get_curtime() + " Loading data ...")
    # load_data(FLAGS.data_file_path)
    load_data_fast()
    print(get_curtime() + " Data loaded.")
    logger.info(get_curtime() + " Data loaded.")
    gpu_config = tf.ConfigProto(allow_soft_placement=True,
                                log_device_placement=True)
    # (self, input_dim, hidden_dim, max_seq_len, max_word_len, class_num, action_num):
    print(FLAGS.embedding_dim, FLAGS.hidden_dim, FLAGS.max_seq_len,
          FLAGS.max_sent_len, FLAGS.class_num, FLAGS.action_num)
    logger.info((FLAGS.embedding_dim, FLAGS.hidden_dim, FLAGS.max_seq_len,
                 FLAGS.max_sent_len, FLAGS.class_num, FLAGS.action_num))

    sess = tf.Session(config=gpu_config)
    with sess.as_default():
        with tf.device('/GPU:0'):
Example #6
0
        mdp = pickle.load(open("../data/mdp.pkl"))
        args = {"give_prompt": True, "mdp": mdp, "id": "new patient"}
        if progress is None:
            progress = myo_state2.Progress(classifier_pkl="../data/state_classifier.pkl", **args)
            is_running = True
        else:
            if is_running:
                progress.reset()

            progress = myo_state2.Progress(classifier_pkl="../data/state_classifier.pkl", **args)
            is_running = True


if __name__ == "__main__":
    import rospy
    from std_msgs.msg import Int32, Empty

    logger.info("Classifier launched. Listening to message...")

    print "Running developer mode..."
    rospy.init_node("build_classifier")
    logger.info("after node...")
    rospy.Subscriber("/exercise/mode", Int32, signal_handler)

    MyoDemo2.pub_l = rospy.Publisher("/exercise/l/playback", Quaternion, queue_size=1)
    MyoDemo2.pub_u = rospy.Publisher("/exercise/u/playback", Quaternion, queue_size=1)

    print "Classifier launched. Listening to message..."

    rospy.spin()
Example #7
0
        action='store_true',
        dest='noappium',
        help=
        'do not start appium and new device in this program, environment has been set up by default'
    )
    parser.add_argument(
        '--noinit',
        action='store_true',
        dest='noinit',
        help='do not go through initialization procedure, mainly for debugging',
        default=False)

    args = parser.parse_args()

    if not args.noinit:
        running_logger.info("Initialisation starts")
        running_logger.info("apk folder: %s", args.apk_file)
        running_logger.info("appium port:%s", args.appium_port)
        running_logger.info("appium back port:%s", args.appium_back_port)
        running_logger.info("appium system port:%s", args.appium_back_port)
        running_logger.info("proxy port:%s", args.proxy_port)

        finished, result_folder = init(args)

        if not finished:
            running_logger.error("Initialisation Fail, result folder: %s",
                                 result_folder)
            exit()
    else:
        manifest = Manifest(args.apk_file)
        package_name = manifest.get_package_name()
Example #8
0
def main(args):
    logger = MyLogger(args.modelId)
    print(args)
    logger.info(args)

    np.random.seed(args.seed)
    th.manual_seed(args.seed)
    th.cuda.manual_seed(args.seed)

    best_epoch = -1
    best_dev_acc = 0

    cuda = args.gpu >= 0
    device = th.device('cuda:{}'.format(args.gpu)) if cuda else th.device('cpu')
    if cuda:
        th.cuda.set_device(args.gpu)

    trainset = SST()
    train_loader = DataLoader(dataset=trainset,
                              batch_size=args.batch_size,
                              collate_fn=batcher(device),
                              shuffle=True,
                              num_workers=0)
    devset = SST(mode='dev')
    dev_loader = DataLoader(dataset=devset,
                            batch_size=100,
                            collate_fn=batcher(device),
                            shuffle=False,
                            num_workers=0)

    testset = SST(mode='test')
    test_loader = DataLoader(dataset=testset,
                             batch_size=100, collate_fn=batcher(device), shuffle=False, num_workers=0)

    if args.tree_lstm:
        model = TreeLSTM(trainset.num_vocabs,
                         args.x_size,
                         args.h_size,
                         trainset.num_classes,
                         args.dropout,
                         device,
                         cell_type='childsum' if args.child_sum else 'nary',
                         pretrained_emb=trainset.pretrained_emb).to(device)
    else:
        model = GraphTransformer(trainset.num_vocabs,
                         args.x_size,
                         trainset.num_classes,
                         args.dropout,
                         device,
                         T_step = args.T_step,
                         pretrained_emb = trainset.pretrained_emb).to(device)
    print(model)
    logger.info(model)

    params_ex_emb =[x for x in list(model.parameters()) if x.requires_grad and x.size(0)!=trainset.num_vocabs]
    params_emb = list(model.embedding.parameters())

    for p in params_ex_emb:
        if p.dim() > 1:
            INIT.xavier_uniform_(p)

    if args.Adagrad:
        optimizer = optim.Adagrad([
            {'params':params_ex_emb, 'lr':args.lr, 'weight_decay':args.weight_decay},
            {'params':params_emb, 'lr':0.1*args.lr}])
    elif args.Adadelta:
        optimizer = optim.Adadelta([
            {'params': params_ex_emb, 'lr': args.lr, 'weight_decay': args.weight_decay},
            {'params': params_emb, 'lr': 0.1 * args.lr}])
    else:
        optimizer = optim.Adam([
            {'params': params_ex_emb, 'lr': args.lr, 'weight_decay': args.weight_decay},
            {'params': params_emb, 'lr': 0.1 * args.lr}])

    dur = []
    for epoch in range(args.epochs):
        t_epoch = time.time()
        model.train()
        for step, batch in enumerate(train_loader):
            if step >= 3:
                t0 = time.time() # tik

            logits = model(batch)
            logp = F.log_softmax(logits, 1)
            loss = F.nll_loss(logp, batch.label, reduction='sum')

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step >= 3:
                dur.append(time.time() - t0) # tok

            if step > 0 and step % args.log_every == 0:
                pred = th.argmax(logits, 1)
                acc = th.sum(th.eq(batch.label, pred))
                root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
                root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])

                print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | Acc {:.4f} | Root Acc {:.4f} | Time(s) {:.4f}".format(
                    epoch, step, loss.item(), 1.0*acc.item()/len(batch.label), 1.0*root_acc/len(root_ids), np.mean(dur)))
                logger.info("Epoch {:05d} | Step {:05d} | Loss {:.4f} | Acc {:.4f} | Root Acc {:.4f} | Time(s) {:.4f}".format(
                    epoch, step, loss.item(), 1.0 * acc.item() / len(batch.label), 1.0 * root_acc / len(root_ids),
                    np.mean(dur)))
        print('Epoch {:05d} training time {:.4f}s'.format(epoch, time.time() - t_epoch))
        logger.info('Epoch {:05d} training time {:.4f}s'.format(epoch, time.time() - t_epoch))

        # eval on dev set
        accs = []
        root_accs = []
        model.eval()
        for step, batch in enumerate(dev_loader):
            with th.no_grad():
                logits = model(batch)
            pred = th.argmax(logits, 1)
            acc = th.sum(th.eq(batch.label, pred)).item()
            accs.append([acc, len(batch.label)])
            root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
            root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])
            root_accs.append([root_acc, len(root_ids)])

        dev_acc = 1.0*np.sum([x[0] for x in accs])/np.sum([x[1] for x in accs])
        dev_root_acc = 1.0*np.sum([x[0] for x in root_accs])/np.sum([x[1] for x in root_accs])
        print("Epoch {:05d} | Dev Acc {:.4f} | Root Acc {:.4f}".format(
            epoch, dev_acc, dev_root_acc))
        logger.info("Epoch {:05d} | Dev Acc {:.4f} | Root Acc {:.4f}".format(
            epoch, dev_acc, dev_root_acc))

        if dev_root_acc > best_dev_acc:
            best_dev_acc = dev_root_acc
            best_epoch = epoch
            th.save(model.state_dict(), './res/best_{}.pkl'.format(args.modelId))
        else:
            if best_epoch <= epoch - 10:
                break

        # lr decay
        for param_group in optimizer.param_groups:
            param_group['lr'] = max(1e-5, param_group['lr']*0.99) #10
            print(param_group['lr'])
            logger.info(param_group['lr'])

    # test
    model.load_state_dict(th.load('./res/best_{}.pkl'.format(args.modelId)))
    accs = []
    root_accs = []
    model.eval()
    for step, batch in enumerate(test_loader):
        g = batch.graph
        n = g.number_of_nodes()
        with th.no_grad():
            logits = model(batch)

        pred = th.argmax(logits, 1)
        acc = th.sum(th.eq(batch.label, pred)).item()
        accs.append([acc, len(batch.label)])
        root_ids = [i for i in range(batch.graph.number_of_nodes()) if batch.graph.out_degree(i)==0]
        root_acc = np.sum(batch.label.cpu().data.numpy()[root_ids] == pred.cpu().data.numpy()[root_ids])
        root_accs.append([root_acc, len(root_ids)])

    test_acc = 1.0*np.sum([x[0] for x in accs])/np.sum([x[1] for x in accs])
    test_root_acc = 1.0*np.sum([x[0] for x in root_accs])/np.sum([x[1] for x in root_accs])
    print('------------------------------------------------------------------------------------')
    print("Epoch {:05d} | Test Acc {:.4f} | Root Acc {:.4f}".format(
        best_epoch, test_acc, test_root_acc))
    logger.info('------------------------------------------------------------------------------------')
    logger.info("Epoch {:05d} | Test Acc {:.4f} | Root Acc {:.4f}".format(
        best_epoch, test_acc, test_root_acc))
Example #9
0
        if res_data.shape[0] > 0:
            helpers.bulk(es_client,
                         doc_generator(res_data, item, "Date", INDEX_1_DAY))
    LOGGER.info(f"Done downloading data for - [{today_date}]")


def min_downloader_main():
    start_date = datetime.today().strftime("%Y-%m-%d")
    end_date = (datetime.today() + timedelta(days=1)).strftime("%Y-%m-%d")
    es_client = get_es_client()

    LOGGER.info(f"Downloading day's worth of data for date - [{start_date}]")
    for item in STOCKS_LIST:
        res_data = download_min_data(item, start_date, end_date)
        LOGGER.info(res_data)
        if res_data.shape[0] > 0:
            helpers.bulk(
                es_client,
                doc_generator(res_data, item, "Datetime", INDEX_1_MIN))
    LOGGER.info(f"Done downloading data for - [{start_date}]")


if __name__ == "__main__":
    LOGGER.info("Inside main function")
    LOGGER.info(
        f"Global variables are:: {STOCKS_LIST}, {ES_HOST}, {ES_PORT}, {ES_USERNAME}, {ES_PASSWORD}"
    )

    min_downloader_main()
    day_downloader_main()
Example #10
0
class FundMonitor(object):
    """
    基金收益查看器
    """
    def __init__(self):
        # 浏览器头
        self.headers = {
            'content-type':
            'application/json',
            'User-Agent':
            'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
        }
        self.pattern = r'^jsonpgz\((.*)\)'
        self.total_fund = None
        self.last_update_time = None
        self.global_config = parse_json(default_config_path)
        self.total_fund_file = self.global_config["total_fund_path"]
        self.target_fund = self.global_config["target_fund"]
        self.logger = MyLogger("monitor.py - Fund Monitor").get_logger()

    def str2list(self, context: str) -> List:
        """
        用于处理回来的数据的第一步
        :param context:
        :return:
        """
        result = []
        start, end = 0, 0
        while start < len(context) and end <= len(context):
            if context[start] == '[':
                if context[end] == ']':
                    result.append(context[start + 1:end])
                    start = end + 1
            else:
                start += 1
            end += 1
        return result

    def preprocess(self, context: str) -> dict:
        """
        回来数据的预处理主入口,后续存储成json保存用
        :param context:
        :return: dict, 形式为fund_num, [fund_name, fund_type]
        """
        temp = self.str2list(context)
        result = dict()
        for idx in temp:
            data = idx.split(',')
            assert len(data) == 5
            fund_num = str(data[0].strip('\"'))
            fund_name = data[2].strip('\"')
            fund_type = data[3].strip('\"')
            if fund_num not in result:
                result.setdefault(fund_num, [fund_name, fund_type])
        return result

    def get_fund_type_list(self):
        self.logger.info("正在更新所有基金列表")
        try:
            url = 'http://fund.eastmoney.com/js/fundcode_search.js'
            res = requests.get(url, headers=self.headers)
            context = re.findall('\[.*\]', res.text)
            context = context[0][1:-1]
            res = self.preprocess(context)  # 回来的数据预处理
            # 存文件
            with open(self.total_fund_file, 'w', encoding='utf-8') as file:
                json.dump(res, file, ensure_ascii=False)
        except:
            self.logger.waring("获取所有基金列表失败")
        else:
            self.logger.info("基金获取并保存完成")

    def get_info(self, fund_num: str) -> str:
        """
        获取基金信息的主入口
        :param fund_num: 基金号码
        :return: 基金信息
        """
        url = "http://fundgz.1234567.com.cn/js/%s.js" % fund_num
        try:
            res = requests.get(url, headers=self.headers)
            context = res.text
            re_result = re.findall(self.pattern, context)
            for idx in re_result:
                data = json.loads(idx)
                fund_num = data["fundcode"]
                fund_type = self.get_type(fund_num)
                formater = "基金:{} | {} | 收益率: {} %".format(
                    data['name'], fund_type, data['gszzl'])
                return formater
        except:
            self.logger.waring("基金代码:{} ,搜索失败".format(fund_num))

    def read_total_fund(self):
        """
        初始化时用于读取全量基金类型使用
        :return:
        """
        try:
            if not os.path.exists(self.total_fund_file):
                raise OSError("全量基金文件不存在")
                self.get_fund_type_list()
            self.total_fund = parse_json(self.total_fund_file)
        except OSError as e:
            self.logger.waring("读取全量基金失败,文件不存在:{}".format(e))

    def get_type(self, fund_num: str) -> List:
        """
        获得该基金的名称与类型
        :param fund_num: 基金号码
        :return:
        """
        if self.total_fund is None:
            self.read_total_fund()
        if fund_num in self.total_fund:
            return self.total_fund.get(fund_num)[1]
        else:
            return []

    def get_target_fund_info(self, target_fund=None) -> List[str]:
        result = []
        if target_fund is None:
            self.logger.info("以global_config文件中的target_fund为目标进行全量查找")
            target_fund = self.target_fund
        for target in target_fund:
            target_result = self.get_info(target)
            result.append(target_result)
        return result
Example #11
0
#                                         trainfile = 'training.1600000.processed.noemoticon.csv',
#                                         testfile = 'testdata.manual.2009.06.14.csv',
#                                         charVocab = char_vocab
#                         )
# # sentiReader.load_data()
# sentiReader.load_data_fast(
#                         '/home/hadoop/ERD/data/senti_train_data.pickle',
#                         '/home/hadoop/ERD/data/senti_train_label.pickle',
#                         '/home/hadoop/ERD/data/senti_test_data.pickle',
#                         '/home/hadoop/ERD/data/senti_test_label.pickle'
#                           )

# (self, input_dim, hidden_dim, max_seq_len, max_word_num, class_num, action_num):
print(FLAGS.embedding_dim, FLAGS.hidden_dim, FLAGS.max_seq_len,
      FLAGS.max_sent_len, FLAGS.class_num, FLAGS.action_num)
logger.info((FLAGS.embedding_dim, FLAGS.hidden_dim, FLAGS.max_seq_len,
             FLAGS.max_sent_len, FLAGS.class_num, FLAGS.action_num))

print(get_curtime() + " Data loaded.")
logger.info(get_curtime() + " Data loaded.")

# In[7]:

# # save the Twitter data
# data = get_data()
# with open('data/data_dict.txt', 'wb') as handle:
#     pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)

# save the PTB data
# with open('data/char_tensors.txt', 'wb') as handle:
#     pickle.dump(char_tensors, handle, protocol=pickle.HIGHEST_PROTOCOL)
Example #12
0
from logger import MyLogger

logger = MyLogger().logger

def test():
    logger.info('teset here')
    
if __name__ == '__main__':
    import rospy
    from std_msgs.msg import Int32, Empty

    rospy.init_node('build_classifier')
    rospy.Subscriber('/exercise/mode', Int32, test)

    print "Classifier launched. Listening to message..."
    logger.info('Classifier launched. Listening to message...')

    logger.info('main log')
    test()
    rospy.spin()
Example #13
0
    parser.add_argument(
        '--noappium',
        action='store_true',
        dest='noappium',
        help='do not start appium and new device in this program,\
                            environment has been set up by default')
    args = parser.parse_args()

    # check working dir// because brother wolf hardcoded something
    if os.path.basename(os.getcwd()) != 'tool_testing':
        running_logger.error('Working dir must be tool_testing')
        running_logger.error('exit!')
        exit(-1)

    # make sure apk exists
    if not os.path.isfile(args.apk_file):
        running_logger.error("Cannot find the apk file")
        running_logger.error("exit!")
        exit(-1)

    # logging
    running_logger.info('Initialisation starts')
    running_logger.info('apk:\t\t\t%s', args.apk_file)
    running_logger.info('appium port:\t\t%s', args.appium_port)
    running_logger.info('appium back port:\t%s', args.appium_back_port)
    running_logger.info('proxy port:\t\t%s', args.proxy_port)
    running_logger.info('system port:\t\t%s', args.appium_system_port)

    # start main func
    main(args)
Example #14
0
                classifier_pkl='../data/state_classifier.pkl', **args)
            is_running = True
        else:
            if is_running:
                progress.reset()

            progress = myo_state2.Progress(
                classifier_pkl='../data/state_classifier.pkl', **args)
            is_running = True


if __name__ == '__main__':
    import rospy
    from std_msgs.msg import Int32, Empty

    logger.info('Classifier launched. Listening to message...')

    print "Running developer mode..."
    rospy.init_node('build_classifier')
    logger.info('after node...')
    rospy.Subscriber('/exercise/mode', Int32, signal_handler)

    MyoDemo2.pub_l = rospy.Publisher('/exercise/l/playback',
                                     Quaternion,
                                     queue_size=1)
    MyoDemo2.pub_u = rospy.Publisher('/exercise/u/playback',
                                     Quaternion,
                                     queue_size=1)

    print "Classifier launched. Listening to message..."