Ejemplo n.º 1
0
def main(args):
    init_config()
    if not os.path.exists(args.file):
        print('File path is invalid')
        sys.exit(1)

    LinesRepository().process_file(args.file)
def config_initialization():
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    #util.init_logger()
    print(FLAGS.batch_size)

    config.init_config(image_shape,
                       batch_size=FLAGS.batch_size,
                       weight_decay=FLAGS.weight_decay,
                       num_gpus=FLAGS.num_gpus,
                       train_with_ignored=FLAGS.train_with_ignored)

    batch_size_per_gpu = int(FLAGS.batch_size / FLAGS.num_gpus)

    tf.summary.scalar('batch_size', FLAGS.batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          FLAGS.dataset_dir)

    return dataset
Ejemplo n.º 3
0
def main(wf):
    """Run when called as main script to generate all icons"""
    import os
    config.init_config(wf)
    start = time.time()
    log.debug('Starting icon generation at {}'.format(
              datetime.datetime.fromtimestamp(start)
              .strftime('%Y-%m-%d %H:%M:%S')))
    con = sqlite3.connect(config.DB_FILE)
    cursor = con.cursor()
    cursor.execute("""SELECT hex, icon FROM chars ORDER BY hex DESC""")
    codepoints = [t[0] for t in cursor.fetchall() if not
                  os.path.exists(os.path.join(config.ICON_DIR, t[1]))]

    generate_icons(codepoints,
                   config.ICON_DIR,
                   logfile=wf.logfile,
                   font=wf.settings.get('font'),
                   size=wf.settings.get('size'))

    stop = time.time()
    log.debug('Finished icon generation at {}'.format(
              datetime.datetime.fromtimestamp(stop)
              .strftime('%Y-%m-%d %H:%M:%S')))
    log.info('{} icons generated in {}'.format(
             len(codepoints),
             util.readable_time(stop - start)))
async def on_ready():
    config.init_config(bot)
    print("Logged in as")
    print(bot.user.name)
    print(bot.user.id)
    print("------")
    scheduling.init_scheduler()
Ejemplo n.º 5
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)

    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file='log_train_pixel_link_%d_%d.log' % image_shape,
                     log_path=FLAGS.train_dir + '-' + FLAGS.decoder,
                     stdout=False,
                     mode='a')

    config.load_config('./')
    config.init_config(image_shape,
                       batch_size=FLAGS.batch_size,
                       weight_decay=FLAGS.weight_decay,
                       num_gpus=FLAGS.num_gpus)

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu

    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on' + '_' + FLAGS.dataset_name)
    if FLAGS.dataset_name == 'isbi2017v2':
        dataset_dir = '/home/give/Documents/dataset/ISBI2017/weakly_label_segmentation_V4/Batch_2/tfrecords_V2_V2'
    elif FLAGS.dataset_name == 'dsb2018':
        dataset_dir = '/home/give/Documents/dataset/data-science-bowl-2018/stage1_train_BW_tfrecords'
    else:
        assert False
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          dataset_dir)
    # config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 6
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    #Init some config,not need to pay too much attention
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    #util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    #Stpe 1: create dataset by xiaodong
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 7
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)

    config.init_config(
        image_shape,
        batch_size=1,
        seg_conf_threshold=FLAGS.seg_conf_threshold,
        link_conf_threshold=FLAGS.link_conf_threshold,
        train_with_ignored=FLAGS.train_with_ignored,
        seg_loc_loss_weight=FLAGS.seg_loc_loss_weight,
        link_cls_loss_weight=FLAGS.link_cls_loss_weight,
    )

    util.proc.set_proc_name('eval_' + FLAGS.model_name + '_' +
                            FLAGS.dataset_name)
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset, print_to_file=False)

    return dataset
Ejemplo n.º 8
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file='log_train_pixel_link_%d_%d.log' % image_shape,
                     log_path=FLAGS.train_dir,
                     stdout=False,
                     mode='a')

    config.load_config(FLAGS.train_dir)

    config.init_config(image_shape,
                       batch_size=FLAGS.batch_size,
                       weight_decay=FLAGS.weight_decay,
                       num_gpus=FLAGS.num_gpus)

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu

    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on' + '_' + FLAGS.dataset_name)

    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 9
0
def main(wf):
    """Run when called as main script to generate all icons"""
    import os
    config.init_config(wf)
    start = time.time()
    log.debug('Starting icon generation at {}'.format(
        datetime.datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M:%S')))
    con = sqlite3.connect(config.DB_FILE)
    cursor = con.cursor()
    cursor.execute("""SELECT hex, icon FROM chars ORDER BY hex DESC""")
    codepoints = [
        t[0] for t in cursor.fetchall()
        if not os.path.exists(os.path.join(config.ICON_DIR, t[1]))
    ]

    generate_icons(codepoints,
                   config.ICON_DIR,
                   logfile=wf.logfile,
                   font=wf.settings.get('font'),
                   size=wf.settings.get('size'))

    stop = time.time()
    log.debug('Finished icon generation at {}'.format(
        datetime.datetime.fromtimestamp(stop).strftime('%Y-%m-%d %H:%M:%S')))
    log.info('{} icons generated in {}'.format(
        len(codepoints), util.readable_time(stop - start)))
Ejemplo n.º 10
0
def get_follow_list():
	config.init_config()
	config.init_app_config()
	oauth1 = get_oauth(config.CONSUMER_KEY,config.CONSUMER_SECRET,config.OAUTH_TOKEN,config.OAUTH_TOKEN_SECRET)
	oauth2 = get_oauth(config.CONSUMER_KEY,config.CONSUMER_SECRET,config.OAUTH_TOKEN1,config.OAUTH_TOKEN_SECRET1)

	oauth_list = [oauth1,oauth2]
	

	users_to_crawl = loadJsonObject('users_to_crawl.txt')
	user_follow_graph = {}
	trial = 0
	for user in users_to_crawl:
		user_id = user[0]
		screen_name = user[1]
		follow_list = get_friend_list(user_id,screen_name,oauth_list[trial],5000)
		trial = (trial + 1)%len(oauth_list)
		user_follow_graph[user_id] = follow_list
		single_user_data = (user_id,follow_list)
		single_user_data_json = json.dumps(single_user_data)
		with open('user_follow_graph_backup.txt','ab') as fp:
			fp.write('*******************\n')
			fp.write(single_user_data_json)
			fp.write('\n')
	saveAsJson(user_follow_graph,'user_follow_graph.txt')
Ejemplo n.º 11
0
    def __init__(self, mode: str, config_path: str = '..\set_locations.ini'):

        if mode not in available_modes:
            logger.error(
                f"""Eye movements Classifier mode should be one from: {available_modes}.
                         Given type {mode} is unrecognized.""")
            raise NotImplementedError

        self._mode = mode
        self._algorithm_name = None
        self._algorithm = None
        self._model_params = {}

        self._estimator = EyemovementsEstimator(
            [metric() for metric in all_metrics_list])

        # If config is not pre-initialized
        if len(config.sections()) == 0:
            # Read config and init config here
            if Path(config_path).exists():
                init_config(config_path)
            else:
                logger.error(
                    f"No pre-initialized config given and no configuration file found at {config_path}."
                )
                raise FileNotFoundError

        self.__init_algorithm()
Ejemplo n.º 12
0
async def on_ready():
    config.init_config(bot)
    print("Logged in as")
    print(bot.user.name)
    print(bot.user.id)
    print("------")

    scheduling.init_scheduler()

    # Schedule tasks
    # Since on_ready may run multiple times due to reconnects, we need to make
    # sure we only schedule jobs once
    global scheduler_initialized
    if not scheduler_initialized:
        scheduler_initialized = True

        if len(config.squadservers) > 0:
            scheduling.interval_execute(
                update_squad_messages, [],
                interval_seconds=config.UPDATE_INTERVAL_SECONDS)
            await update_squad_messages()
        else:
            print(
                "No Squad servers configured. Skipping Squad status updating.")

        if len(config.postservers) > 0:
            scheduling.interval_execute(
                update_post_messages, [],
                interval_seconds=config.UPDATE_INTERVAL_SECONDS)
            await update_post_messages()
        else:
            print(
                "No Post Scriptum servers configured. Skipping Post Scriptum status updating."
            )
Ejemplo n.º 13
0
def config_initialization():
    # image shape and feature layers shape inference
    config.default_config()
    image_shape = (config.train_image_height, config.train_image_width)

    if not config.dataset_path:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(
        log_file='log_train_pixel_link_%d_%d.log' % image_shape,
        log_path=config.train_dir, stdout=False, mode='a')

    # config.load_config(config.train_dir)
    config.init_config(image_shape,
                       batch_size=config.batch_size,
                       weight_decay=config.weight_decay,
                       num_gpus=config.num_gpus
                       )
    config.default_config()
    config.score_map_shape = (config.train_image_height // config.strides[0],
                              config.train_image_width // config.strides[0])
    height = config.train_image_height
    score_map = config.score_map_shape
    stride = config.strides[0]
    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu

    util.proc.set_proc_name('train_pixel_link_on' + '_' + config.dataset_name)
Ejemplo n.º 14
0
def config_initialization():
    image_shape = (FLAGS.image_height, FLAGS.image_width)

    config.init_config(image_shape,
                       batch_size=1,
                       pixel_conf_threshold=FLAGS.pixel_conf_threshold,
                       link_conf_threshold=FLAGS.link_conf_threshold)
Ejemplo n.º 15
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 16
0
def create_app():
    # app = Flask(__name__)

    from config import init_config
    init_config(app)

    # from sentry import init_sentry
    # init_sentry(app)

    from model import init_db
    init_db(app)

    from controller import init_blueprint
    init_blueprint(app)
    #
    # if False and app.config.get('CRON_STATE') is True:
    #     from cron import init_cron
    #     init_cron(app)

    from utils import init_util
    init_util(app)

    # from utils.kafka import KafkaClient
    # KafkaClient.init_kafka()
    #
    # from redisdb import init_redis
    # init_redis(app)

    return app
Ejemplo n.º 17
0
 def douban_login(self):
     '''登陆douban.fm获取token'''
     if os.path.exists(config.PATH_TOKEN):
         # 已登陆
         logger.info("Found existing Douban.fm token.")
         with open(config.PATH_TOKEN, 'r') as f:
             self.login_data = pickle.load(f)
             self.token = self.login_data['token']
             self.user_name = self.login_data['user_name']
             self.user_id = self.login_data['user_id']
             self.expire = self.login_data['expire']
             self.default_volume = int(self.login_data['volume'])\
                 if 'volume' in self.login_data else 50
             # Value stored in login_data in token file is lien number
             # instead of channel_id! Will do set_channel later.
             self.default_channel = int(self.login_data['channel'])\
                 if 'channel' in self.login_data else 0
         print '\033[31m♥\033[0m Get local token - Username: \033[33m%s\033[0m' %\
             self.user_name
     else:
         # 未登陆
         logger.info('First time logging in Douban.fm.')
         while True:
             self.email, self.password = self.win_login()
             login_data = {
                 'app_name': 'radio_desktop_win',
                 'version': '100',
                 'email': self.email,
                 'password': self.password
             }
             s = requests.post('http://www.douban.com/j/app/login', login_data)
             dic = json.loads(s.text, object_hook=_decode_dict)
             if dic['r'] == 1:
                 logger.debug(dic['err'])
                 continue
             else:
                 self.token = dic['token']
                 self.user_name = dic['user_name']
                 self.user_id = dic['user_id']
                 self.expire = dic['expire']
                 self.default_volume = 50
                 self.default_channel = 1
                 self.login_data = {
                     'app_name': 'radio_desktop_win',
                     'version': '100',
                     'user_id': self.user_id,
                     'expire': self.expire,
                     'token': self.token,
                     'user_name': self.user_name,
                     'volume': '50',
                     'channel': '0'
                 }
                 logger.info('Logged in username: '******'w') as f:
                     pickle.dump(self.login_data, f)
                     logger.debug('Write data to ' + config.PATH_TOKEN)
                 break
     # set config
     config.init_config()
Ejemplo n.º 18
0
def main():
    init_config()
    options.parse_config_file("webssh.conf")

    http_server = tornado.httpserver.HTTPServer(Application(), xheaders=True)
    http_server.listen(options.port)
    IOLoop.instance().start()
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 19
0
    def __init__(self, method_name="runTest"):
        super().__init__(method_name)
        self._current_base_path = Path(__file__).parent.parent.resolve()
        init_config(str(self._current_base_path / "set_locations.ini"))

        ds = TrainDataset(ds_path=config.get('DataPaths', 'run_data'))
        logger.info(f"Found unique users: {len(ds.get_users())}")
        self.train_dataset = ds.create_dataset()
Ejemplo n.º 20
0
 def __init__(self, config_path: str, **kwargs):
     self._config_path = config_path
     init_config(config_path)
     self.__available_modes = ['train', 'run']
     self._model = None
     self._trainer = Trainer()
     self._eyemovements_classifier = EyemovementsClassifier(mode=kwargs.get('mode', 'calibrate'),
                                                            config_path=config_path)
Ejemplo n.º 21
0
def main():
    init_config()
    options.parse_config_file("/var/www/app/webssh.conf")

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    IOLoop.instance().start()
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 22
0
def main():
    config.init_config()
    dbmanager.connect_to_database()
    # dbmanager.create_temp_roads_table()
    # dbmanager.insert_target_data_to_temp_roads_table()

    # load data from database, include road data and their intersection points.
    original_road_string_data = dbmanager.query_temp_roads()
    # intersection_point_data = dbmanager.query_total_intersection_points()

    # get complete road data from original road data.
    # complete_road_string_data must be checked on QGIS.
    complete_road_string_data = get_complete_road_string_list(
        original_road_string_data)

    # just for check
    update_road_code_to_database(complete_road_string_data)

    # get all CROSS, TOUCH, END type points
    # points data must be checked on QGIS.

    # get CROSS type point
    cross_points = get_each_road_cross_points(complete_road_string_data)

    # get TOUCH type point
    touch_points = get_each_road_touch_points(complete_road_string_data,
                                              cross_points)

    # get END type point
    end_points = get_each_road_end_points(complete_road_string_data,
                                          touch_points)

    # check points
    insert_type_points_to_database(cross_points, touch_points, end_points)

    # get distance between two points
    roads_distance_map = calculate_roads_distance(complete_road_string_data)

    type_points_dict_by_road_id = dict_by_road_id(complete_road_string_data,
                                                  cross_points, touch_points,
                                                  end_points)

    # calculate each point's height.
    key_points_z_value = calculate(original_road_string_data,
                                   type_points_dict_by_road_id,
                                   roads_distance_map, cross_points,
                                   touch_points, end_points)

    # interpolate each complete road.

    # smooth each complete road in its z axis.
    smooth_z_axis(complete_road_string_data, key_points_z_value)

    # save road data with z value to database or file.
    output_data_to_file(complete_road_string_data)

    # disconnect to database.
    dbmanager.disconnect_to_database()
Ejemplo n.º 23
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
    tf.logging.set_verbosity(tf.logging.DEBUG)
    config.init_config(image_shape,
                       batch_size=1,
                       num_gpus=1,
                       pixel_conf_threshold=0.5,
                       link_conf_threshold=0.5)
Ejemplo n.º 24
0
 def _config_initialization(self):
     image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
     config.load_config(FLAGS.checkpoint_path)
     config.init_config(image_shape,
                        batch_size=1,
                        pixel_conf_threshold=0.9,
                        link_conf_threshold=0.1,
                        num_gpus=1,
                        )
Ejemplo n.º 25
0
def main():
    init_config()
    # options.parse_config_file("webssh.conf")

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(8888)
    IOLoop.instance().start()
    welcome(8888)
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 26
0
 def connect(cls, dbname):
     db = None
     config.init_config()
     options.parse_config_file('pacado.conf')
     if dbname == 'meta':
         db = MongoClient(options.mongo_meta_uri)['metadata']
     if dbname == 'cnyb':
         db = MongoClient(options.mongo_cnyb_uri)['cnyb']
     return db
Ejemplo n.º 27
0
def main():
    asyncio.set_event_loop(asyncio.new_event_loop())
    init_config()
    options.parse_config_file("webssh.conf")

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    IOLoop.instance().start()
    welcome(options.port)
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 28
0
def main():
    init_config()
    options.parse_config_file("webssh.conf")
    app = tornado.web.Application([
        (r"/ws", handlers.WSHandler),
    ])
    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(options.port)
    IOLoop.instance().start()
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 29
0
Archivo: main.py Proyecto: uevol/devops
def main():
    init_config()
    options.parse_config_file(
        os.path.join(os.path.dirname(__file__), "webssh.conf"))

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    IOLoop.instance().start()
    welcome(options.port)
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 30
0
    def __init__(self, method_name="runTest"):
        logger.info(f"Testing Eye movements Module started")
        super().__init__(method_name)
        init_config("../set_locations.ini")

        self.train_dataset = TrainDataset(config.get(
            "DataPaths", "run_data"), ).create_dataset()
        logger.info(f"Shape of loaded data: {self.train_dataset.shape}")
        logger.info(f"Unique users: {self.train_dataset['user_id'].nunique()}")
        logger.info(
            f"Unique sessions: {self.train_dataset['session_id'].nunique()}")
Ejemplo n.º 31
0
def tweet_tester():
	config.init_config()
	config.init_app_config()
	oauth1 = get_oauth(config.CONSUMER_KEY,config.CONSUMER_SECRET,config.OAUTH_TOKEN,config.OAUTH_TOKEN_SECRET)
	oauth2 = get_oauth(config.CONSUMER_KEY,config.CONSUMER_SECRET,config.OAUTH_TOKEN1,config.OAUTH_TOKEN_SECRET1)

	# get100TweetsbeforethisTweetID('ultron',592478218063638528,594616538205802497,oauth)

	max_id = 594652545894764544
	since_id = 592478218063638528
	reset_file(config.TWEET_STORAGE_SHEET)
	get_all_tweets_by_id('ultron',since_id,max_id,[oauth1,oauth2],config.TWEET_STORAGE_SHEET)
Ejemplo n.º 32
0
def main():
    config.init_config(parse_command_line())
    config.basic_conf_check()
    log.set_log_level('app_log', logging.DEBUG)
    app = init_app()
    http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
    http_server.bind(config.server_port)
    http_server.start(config.process_num)
    tornado.ioloop.IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
    print(('-' * 20, 'api_test_svr start', '-' * 20))
    # asyncio.get_event_loop().run_forever()
    tornado.ioloop.IOLoop.current().start()
Ejemplo n.º 33
0
Archivo: main.py Proyecto: Sandbar/GA
def ga_maker4():
    try:
        if request.method == 'GET':
            config.init_config()  ### 配置文件,上传需要删除
            sg = standard_genetic.StandardGenetic()
            sg.main()
            print('OK')
            return "OK "
    except Exception as e:
        print(str(e))
        log_maker.logger.info(str(e))
        return "except"
Ejemplo n.º 34
0
def main():
	if len(sys.argv) > 2:
		from config import init_config
		init_config(sys.argv[1])
		from config import init_config
		from config import CONFIG
		from logger import CustomLogger
		cust_logger = CustomLogger(CONFIG.web_server.logger_name)
		cust_logger.add_file("log/"+CONFIG.web_server.logger_name, False)
		import app
		if bool(int(sys.argv[2])) == True:
			app.main()
Ejemplo n.º 35
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    
    config.init_config(image_shape, batch_size = 1, seg_conf_threshold = FLAGS.seg_conf_threshold,
                       link_conf_threshold = FLAGS.link_conf_threshold)

    util.proc.set_proc_name('test' + FLAGS.model_name)
Ejemplo n.º 36
0
def main():
    init_config()
    options.parse_config_file("webssh.conf")
    signal.signal(signal.SIGINT,f)

    wxchat =threading.Thread(target=wechatAction)
    wxchat.start()

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    welcome(options.port)
    tornado.ioloop.IOLoop.instance().start()
Ejemplo n.º 37
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)

    config.init_config(image_shape, batch_size=1, seg_conf_threshold=FLAGS.seg_conf_threshold,
                       link_conf_threshold=FLAGS.link_conf_threshold)

    util.proc.set_proc_name('test' + FLAGS.model_name)
Ejemplo n.º 38
0
def config_initialization():
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    config.init_config(image_shape, batch_size = FLAGS.batch_size)

    util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
#     config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 39
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    
    tf.logging.set_verbosity(tf.logging.DEBUG)
    config.load_config(FLAGS.checkpoint_path)
    config.init_config(image_shape, 
                       batch_size = 1, 
                       pixel_conf_threshold = 0.8,
                       link_conf_threshold = 0.8,
                       num_gpus = 1, 
                   )
    
    util.proc.set_proc_name('test_pixel_link_on'+ '_' + FLAGS.dataset_name)
Ejemplo n.º 40
0
def main():
    config.init_config()
    print('go to model')
    print '*' * 80
    _log_file = open(config.LOG_FILE_PRE + time.strftime("_%Y_%m_%d_%H%M%S", time.localtime()), 'w')
    # log configuration.
    config.log_config(_log_file)
    # initialize model
    weights_path = None
    if config.MODE == 2:
        if config.DATASET == 'WSJ0':
            weights_path = './_tmp_weights/ASAM_WSJ0_weight_00031.h5'
        elif config.DATASET == 'THCHS-30':
            weights_path = './_tmp_weights/ASAM_THCHS30_weight_00034.h5'
    dl4ss_model = nnet.NNet(_log_file, weights_path)
    if config.MODE == 1:
        print 'Start to train model ...'
        _log_file.write('Start to train model ...\n')
        dl4ss_model.train()

    print 'valid spk number: 2'
    _log_file.write('valid spk number: 2\n')
    dl4ss_model.predict(config.VALID_LIST, spk_num=2)
    print 'test spk number: 2'
    _log_file.write('test spk number: 2\n')
    dl4ss_model.predict(config.TEST_LIST, spk_num=2)
    print 'test spk number: 3'
    _log_file.write('test spk number: 3\n')
    dl4ss_model.predict(config.TEST_LIST, spk_num=3)
    print 'test spk number: 2 with bg noise'
    _log_file.write('test spk number: 2 with bg noise\n')
    dl4ss_model.predict(config.TEST_LIST, spk_num=2, add_bgd_noise=True)

    for supp_time in [0.25, 0.5, 1, 2, 4, 8, 16, 32]:
        print 'unk spk and supplemental wav span: %02d' % supp_time
        _log_file.write('unk spk and supplemental wav span: %02d\n' % supp_time)
        dl4ss_model.predict(config.UNK_LIST, spk_num=2, unk_spk=True, supp_time=supp_time)
    else:
        print 'Wrong mode: %s' % config.MODE
        _log_file.write('Wrong mode: %s\n' % config.MODE)
    _log_file.close()
Ejemplo n.º 41
0
def execute():
    parser = argparse.ArgumentParser(
        description="Gito - Command Line Utility to Print "
                    "TODO`s in source files & upload to wunderlist")
    parser.add_argument('-w', '--wsync', help='Sync the TODO`s to wunderlist',
                        action="store_true")
    parser.add_argument('-d', '--display', help='Display all the TODO`s',
                        action="store_true")
    parser.add_argument('-sd', '--safedisplay',
                        help='Display all the TODO`s without ASCII color encoding',
                        action="store_true")
    parser.add_argument('-i', '--initconfig',
                        help='Init Gito Configuration file under home',
                        action="store_true")
    parser.add_argument('-v', '--version',
                        help='Prints version',
                        action="store_true")
    args = parser.parse_args()

    if args.wsync:
        # Running validations
        validation.wunderlist_validation()

        # Starting wunderlist upload
        print 'Uploading tasks to Wunderlist:'
        todo_ob = todo.GiTo()
        todo_ob.upload_todos("default")

    elif args.display:
        todo_ob = todo.GiTo()
        todo_ob.print_todo("default")
    elif args.safedisplay:
        todo_ob = todo.GiTo()
        todo_ob.print_todo("safe")
    elif args.initconfig:
        config.init_config()
    elif args.version:
        print "2.0"
    else:
        parser.print_help()
Ejemplo n.º 42
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    
    config.init_config(image_shape, 
                       batch_size = 1, 
                       seg_conf_threshold = FLAGS.seg_conf_threshold,
                       link_conf_threshold = FLAGS.link_conf_threshold, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )
        
    
    util.proc.set_proc_name('eval_' + FLAGS.model_name + '_' + FLAGS.dataset_name )
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset, print_to_file = False)
    
    return dataset
Ejemplo n.º 43
0
def main():
	misc.mkdir_p("data/site/%s/%s/" % (config.wiki_lang, config.start_cat))
	misc.mkdir_p("data/pages/%s/%s/" % (config.wiki_lang, config.start_cat))
	misc.mkdir_p("data/speling/%s/%s/" % (config.wiki_lang, config.start_cat))

	config.init_config()

	# stage 1 - obtaining list of subcategories
	print("** Stage 1: Obtaining list of subcategories to crawl. **")
	subcats = crawler.crawl_subcats()

	# stage 2 - obtaining list of pages
	print("** Stage 2: Obtaining list of pages to crawl. **")
	pages = crawler.crawl_pages(subcats)

	# stage 3 - crawling all pages
	print("** Stage 3: Crawling all pages in list. **")
	crawler.crawl_all_pages(pages)

	# stage 4 - parsing (scraping) all pages
	print("** Stage 4: Parsing all pages in list. **")
	spelings = parser.parse(pages)

	# stage 5 - write to file
	print("** Stage 5: Writing final results to file. **")
	f = open("data/speling.txt", "w")
	for speling in spelings:
		f.write(speling + "\n")
	f.close()

	print("")
	print("")
	print("=== STATS ===")
	print("Crawled %d pages" % len(pages))
	print("Obtained %d spelings" % len(spelings))
	print("=============")
Ejemplo n.º 44
0
def init():
    global status, app_config, darkice_config

    lcd_display.info("...............\n...............")
    lcd_display.info("server\nstarting up")
    status = []

    lcd_display.info("loading\nconfig file")
    app_config_file = open(path.join('config', 'pi_stream.ini'))
    app_config.clear()
    app_config, app_config_parser_errors = parse_app_config(app_config_file)
    app_config_file.close()

    if len(app_config_parser_errors) != 0:
        status.append(app_config_parser_errors)

    if len(app_config) != 0:  # no errors
        lcd_display.info("loading default/\ndarkice config")
        darkice_config_file = open(app_config['defaultConfig'])
        darkice_config, darkice_config_parser_errors = config.init_config(darkice_config_file)
        darkice_config_file.close()

        if len(darkice_config_parser_errors) != 0:
            status.append(darkice_config_parser_errors)

    lcd_display.info("running\nstatus tests")
    status = run_all_tests()
    lcd_display.start_process()
    try:
        ip_address = get_ip_address('eth0')
        lcd_display.put(u"Reach me at\n{}".format(ip_address), lcd_display.GOOD)
    except Exception as e:
        pass # dont do anything, there will be an error in the queue anyway
    for item in status:
        if item['result'] is TestStatus.Error:
            lcd_display.put(item['lcd_message'], lcd_display.ERROR)
        if item['result'] is TestStatus.Attention:
            lcd_display.put(item['lcd_message'], lcd_display.INFO)
        pass
Ejemplo n.º 45
0
		self.theInitializer =  signalManager.manager(self.theWindow)   
		    

if __name__=="__main__":
	
	app = QApplication(sys.argv)

	if len(sys.argv) > 2:
		print("Too many arguments - Exiting")
		sys.exit(1)
	elif len(sys.argv) == 2:
		if sys.argv[1] != "--debug":
			print("Unknown argument - Exiting")
			sys.exit(1)
		else:
			config.init_config(True)
	else:
		config.init_config()

	langmodule.setuplang()

	if not utilities.isAVGDRunning():
		QMessageBox.critical(None, langmodule.attention, langmodule.avgnotrunning, 
				QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)
		#print(langmodule.avgnotrunning)
		exit(1)

	signal.signal(signal.SIGALRM, avgstopped_handler)
	mainpid = os.getpid()

	theApp = theApplication()
Ejemplo n.º 46
0
message_pid = None


def sigterm(signum, frame):
    global service_pid
    global message_pid

    if service_pid is not None:
        Logger.common_logger.info('Stopping Service')
        os.kill(int(service_pid), signal.SIGTERM)

    if message_pid is not None:
        Logger.common_logger.info('Stopping Telehash')
        os.kill(int(message_pid), signal.SIGTERM)
    sys.exit(1)

if __name__ == '__main__':
    init_config()
    init_logger()

    Logger.common_logger.info('Starting DESHIP Platform')

    service_pid = service_server.run_service()
    message_pid = TelehashServer().run()

    signal.signal(signal.SIGTERM, sigterm)
    signal.signal(signal.SIGINT, sigterm)

    while True:
        time.sleep(1)
Ejemplo n.º 47
0
    parser = argparse.ArgumentParser(description='F-Secure http checker utility')
    subparsers = parser.add_subparsers()

    config_file_parser = subparsers.add_parser('config')
    config_file_parser.add_argument('--config', action='store', help='config file', required=True)

    options_parser = subparsers.add_parser('options')
    options_parser.add_argument('--datafile', action='store', help='data file containing list of urls', required=True)
    options_parser.add_argument('--period', action='store', type=int, help='check period in seconds', default=30)
    options_parser.add_argument('--logfile', action='store', help='log file', default='output.log')
    options_parser.add_argument('--host', action='store', help='host to listen', default='127.0.0.1')
    options_parser.add_argument('--port', action='store', type=int, help='port to listen', default=8080)

    args = vars(parser.parse_args())
    if not args:
        parser.print_help()

    # # assuming config file has all required options

    init_config(args.get('config', args))
    setup_logging()

    data = []
    last = {}

    with open(config.datafile) as f:
        [data.append(x.rstrip().split('|')) for x in f.readlines()]

    Worker().run(data, last)

    print('\nthe end')
Ejemplo n.º 48
0
    def douban_login(self):
        '''登陆douban.fm获取cookie和token'''
        if os.path.exists(config.PATH_TOKEN) and os.path.exists(config.PATH_COOKIE) and not self.update:
            # 已登陆
            logger.info("Found existing douban.fm cookie.")
            with open(config.PATH_COOKIE, 'r') as f:
                self.cookie = pickle.load(f)
            logger.info("Found existing douban.fm token.")
            with open(config.PATH_TOKEN, 'r') as f:
                self.login_data = pickle.load(f)
                self.token = self.login_data['token']
                self.user_name = self.login_data['user_name']
                self.user_id = self.login_data['user_id']
                self.expire = self.login_data['expire']
                self.default_volume = int(self.login_data['volume'])\
                    if 'volume' in self.login_data else 50
                # Value stored in login_data in token file is lien number
                # instead of channel_id! Will do set_channel later.
                self.default_channel = int(self.login_data['channel'])\
                    if 'channel' in self.login_data else 0
            print '\033[31m♥\033[0m Get local token - Username: \033[33m%s\033[0m' %\
                self.user_name
            logger.info("Updating played record.")
            resp = requests.get('http://douban.fm', cookies=self.cookie)
            try:
                self.played = re.findall('rec_played">(\d+?)<', resp.text)[0]
            except IndexError:
                sys.exit('发现一个错误哦(>0<)...试试重新登录咯?')
            self.liked = re.findall('rec_liked">(\d+?)<', resp.text)[0]
            self.faved = re.findall('faved">(\d+?)<', resp.text)[0]
        else:
            # 未登陆
            logger.info('First time logging in douban.fm.')
            bid = '"' + ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11)) + '"'
            while True:
                self.email = raw_input('Email: ')
                self.password = getpass.getpass('Password: '******'"' + ''.join(random.choice(string.ascii_letters + string.digits) for x in range(11)) + '"'
                # 获取cookie
                form = self.build_login_form(bid)
                resp = requests.post('http://douban.fm/j/login', data=form['data'], headers=form['headers'], cookies={'bid': bid})
                dic = json.loads(resp.text, object_hook=_decode_dict)
                if dic['r'] == 1:
                    logger.debug(dic['err_msg'])
                    continue
                else:
                    self.cookie = {'bid': bid, 'dbcl2': resp.cookies['dbcl2'], 'ck': dic['user_info']['ck'], 'fmNlogin': '******'}
                    logger.info('Get cookie successfully!')
                    with open(config.PATH_COOKIE, 'w') as f:
                        pickle.dump(self.cookie, f)
                        logger.debug('Write data to ' + config.PATH_COOKIE)
                    logger.info("Updating played record.")
                    play_record = dic['user_info']['play_record']
                    self.played = str(play_record['played'])
                    self.liked = str(play_record['liked'])
                    self.faved = str(play_record['fav_chls_count'] - 1)

                # 获取token
                login_data = {
                    'app_name': 'radio_desktop_win',
                    'version': '100',
                    'email': self.email,
                    'password': self.password
                }
                resp = requests.post('http://www.douban.com/j/app/login', login_data, cookies={'bid': bid})
                dic = json.loads(resp.text, object_hook=_decode_dict)
                if dic['r'] == 1:
                    logger.debug(dic['err'])
                    continue
                else:
                    self.token = dic['token']
                    self.user_name = dic['user_name']
                    self.user_id = dic['user_id']
                    self.expire = dic['expire']
                    self.default_volume = 50
                    self.default_channel = 1
                    self.login_data = {
                        'app_name': 'radio_desktop_win',
                        'version': '100',
                        'user_id': self.user_id,
                        'expire': self.expire,
                        'token': self.token,
                        'user_name': self.user_name,
                        'volume': '50',
                        'channel': '0',
                    }
                    logger.info('Get token successfully!')
                    with open(config.PATH_TOKEN, 'w') as f:
                        pickle.dump(self.login_data, f)
                        logger.debug('Write data to ' + config.PATH_TOKEN)
                break
        # set config
        config.init_config()
Ejemplo n.º 49
0
import numpy as np
import ml_metrics as metrics
from tqdm import tqdm

from report import CSVReport, ExcelReport
from config import init_config

from uuid import uuid4

logging.basicConfig(level=logging.INFO,
    format='%(asctime)s %(levelname)s %(message)s')

#logging = logging.getlogging(__name__)
#logging.setLevel(level=logging.DEBUG)

cfg = init_config('config.json')
logging.debug("Application was launched with config: %s" % str(cfg.init_dict))


def get_split_date(df, split_event, train_ratio=0.8):
    """Calculates split date

    Calculates the moment of time that we will use to split
    data into the train (befor the moment) and the test sets

    Args:
        df: Spark DataFrame
        train_ratio: ratio of samples in train set

    Returns:
        A datetime object
Ejemplo n.º 50
0
            # mix_input_spec (batch_size, time_steps, spectrum_dim)
            mix_input_spec = np.array(batch_input_mix_spec).reshape((batch_size, ) + spec_mix.shape)
            # target_input_spk (batch_size, 1)
            target_input_spk = np.array(batch_input_spk, dtype=np.int32).reshape((batch_size, 1))
            # clean_input_fea (batch_size, time_steps, feature_dim)
            clean_input_fea = np.array(batch_input_clean_fea).reshape((batch_size, ) + feature_inp_clean.shape)
            # clean_target_spec (batch_size, time_steps, spectrum_dim)
            clean_target_spec = np.array(batch_target_spec).reshape((batch_size, ) + spec_clean.shape)

            yield ({'input_mix_feature': mix_input_fea, 'input_mix_spectrum': mix_input_spec,
                    'input_target_spk': target_input_spk, 'input_clean_feature': clean_input_fea},
                   {'target_clean_spectrum': clean_target_spec})
            batch_input_mix_fea = []
            batch_input_mix_spec = []
            batch_input_spk = []
            batch_input_clean_fea = []
            batch_target_spec = []
            batch_input_len = []
            batch_count = 0

if __name__ == "__main__":
    config.init_config()
    spk_to_idx, idx_to_spk = get_idx(config.TRAIN_LIST, config.VALID_LIST, config.TEST_LIST)
    x, y = next(get_feature(config.TRAIN_LIST, spk_to_idx, min_mix=config.MIN_MIX, max_mix=config.MAX_MIX,
                            batch_size=config.BATCH_SIZE))
    print (x['input_mix_feature'].shape)
    print (x['input_mix_spectrum'].shape)
    print (x['input_target_spk'].shape)
    print (x['input_clean_feature'].shape)
    print (y['target_clean_spectrum'].shape)
Ejemplo n.º 51
0
def main(wf):
    config.init_config(wf)
    # Add custom magic arguments
    wf.magic_arguments['genicons'] = generate_all_icons
    wf.magic_arguments['delicons'] = clear_all_icons
    # Icons that don't yet exist
    icons_to_generate = set()
    query = None
    if len(wf.args):
        query = wf.args[0]

    # Back up
    if query.endswith(config.DELIMITER):
        query = wf.cached_data('last-query', max_age=0)
        if query:
            query = '{} {}'.format(config.KEYWORD, query)
        else:
            query = '{} '.format(config.KEYWORD)
        run_alfred(query)
        return

    if config.DELIMITER in query:  # Show character details

        name, query = [s.strip() for s in query.split(config.DELIMITER)]
        log.info('Details for character `{}`'.format(name))
        info = charinfo(name)

        if not info:
            wf.add_item('Unknown character', name, icon=ICON_WARNING)

        else:

            output = []
            icon = os.path.join(config.ICON_DIR, info['icon'])
            if not os.path.exists(icon):
                icons_to_generate.add(info['codepoint'])

            # Compile codepoint data
            for key in config.display_keys:
                name = config.key_name_map[key]
                value = info[key]
                if not value:
                    continue
                output.append((name, value))

            if query:
                output = wf.filter(query, output, lambda t: t[0], min_score=40)

            if not output:
                wf.add_item('No matching representations', icon=ICON_WARNING)

            for (name, value) in output:
                wf.add_item(
                    value,
                    name,
                    valid=True,
                    copytext=value,
                    largetext=value,
                    arg=value,
                    icon=icon,
                )
        # Generate icons if need be
        generate_icons(icons_to_generate,
                       config.ICON_DIR,
                       logfile=wf.logfile,
                       font=wf.settings.get('font'),
                       size=wf.settings.get('size'))
        wf.send_feedback()
        return

    # Plain query (no delimiter), so search Unicode codepoints

    # Cache user-entered query, so we can jump back to it later
    wf.cache_data('last-query', query)

    start = time.time()
    con = sqlite3.connect(config.DB_FILE)
    con.create_function('rank', 1, make_rank_func((1.0, 0, 1.5, 0)))
    cursor = con.cursor()
    results = search(query, cursor)
    if not results:
        results = search(query + '*', cursor)
        if not results:
            results = search('*{}*'.format(query), cursor)

    if not results:
        wf.add_item('No matches', 'Try a different query', icon=ICON_WARNING)

    log.info('{:d} results for `{}` in {:0.3f} seconds'.format(
             len(results), query, time.time() - start))

    for (name, h, entity, icon) in results:
        icon = os.path.join(config.ICON_DIR, icon)
        if not os.path.exists(icon):
            icons_to_generate.add(h)
            # icon = os.path.join('icons', 'unknown.png')
        subtitle = 'U+{}'.format(h)
        wf.add_item(name,
                    subtitle,
                    autocomplete='{} {} '.format(name, config.DELIMITER),
                    copytext=subtitle,
                    arg=icon,
                    type='file',
                    icon=icon)

    # Generate icons if need be
    generate_icons(icons_to_generate,
                   config.ICON_DIR,
                   logfile=wf.logfile,
                   font=wf.settings.get('font'),
                   size=wf.settings.get('size'))
    wf.send_feedback()
Ejemplo n.º 52
0
Archivo: app.py Proyecto: CMGS/jagare
#!/usr/bin/python
#coding:utf-8

import logging
import config
config.init_config('config.yaml', 'local_config.yaml')

from flask import Flask
from views import init_view
from libs.colorlog import ColorizingStreamHandler

app = Flask(__name__)
app.debug = config.DEBUG
app.config['UPLOAD_FOLDER'] = config.UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = config.MAX_CONTENT_LENGTH

logging.StreamHandler = ColorizingStreamHandler
logging.BASIC_FORMAT = "%(asctime)s [%(name)s] %(message)s"
logging.basicConfig(level=logging.DEBUG if config.DEBUG else logging.INFO)
logger = logging.getLogger(__name__)

init_view(app)

@app.route('/')
def index():
    '''
jagare 首页,返回描述文字以及当前运行版本号。
    '''
    return "This is Jägare, the git backend server of titan."

Ejemplo n.º 53
0
    def douban_login(self):
        '''登陆douban.fm获取token'''
        path_token = os.path.expanduser('~/.douban_token.txt')
        if os.path.exists(path_token):
            # 已登陆
            logger.info("Found existing Douban.fm token.")
            with open(path_token, 'r') as f:
                self.login_data = pickle.load(f)
                self.token = self.login_data['token']
                self.user_name = self.login_data['user_name']
                self.user_id = self.login_data['user_id']
                self.expire = self.login_data['expire']
                self.default_volume = int(self.login_data['volume'])\
                    if 'volume' in self.login_data else 50
                self.default_channel = int(self.login_data['channel'])\
                    if 'channel' in self.login_data else 1

                # 存储的default_channel是行数而不是真正发送数据的channel_id
                # 这里需要进行转化一下
                self.set_channel(self.default_channel)
            print '\033[31m♥\033[0m Get local token - Username: \033[33m%s\033[0m' % self.user_name
        else:
            # 未登陆
            logger.info('First time logging in Douban.fm.')
            while True:
                self.email, self.password = self.win_login()
                login_data = {
                    'app_name': 'radio_desktop_win',
                    'version': '100',
                    'email': self.email,
                    'password': self.password
                }
                s = requests.post('http://www.douban.com/j/app/login', login_data)
                dic = json.loads(s.text, object_hook=_decode_dict)['song']
                if dic['r'] == 1:
                    logger.debug(dic['err'])
                    continue
                else:
                    self.token = dic['token']
                    self.user_name = dic['user_name']
                    self.user_id = dic['user_id']
                    self.expire = dic['expire']
                    self.default_volume = 50
                    self.default_channel = 1
                    self.login_data = {
                        'app_name': 'radio_desktop_win',
                        'version': '100',
                        'user_id': self.user_id,
                        'expire': self.expire,
                        'token': self.token,
                        'user_name': self.user_name,
                        'volume': '50',
                        'channel': '0'
                    }
                    logger.info('Logged in username: '******'w') as f:
                        pickle.dump(self.login_data, f)
                        logger.debug('Write data to ' + path_token)
                    break

        self.last_fm_username = \
            self.login_data['last_fm_username'] if 'last_fm_username' in self.login_data\
            else None
        self.last_fm_password = \
            self.login_data['last_fm_password'] if 'last_fm_password' in self.login_data\
            else None
        # last.fm登陆
        try:
            if sys.argv[1] == 'last.fm':
                from hashlib import md5
                username = raw_input('last.fm username:'******'last.fm password:'******'r') as f:
                    data = pickle.load(f)
                with open(path_token, 'w') as f:
                    data['last_fm_username'] = username
                    data['last_fm_password'] = self.last_fm_password
                    pickle.dump(data, f)
        except IndexError:
            pass
        # set config
        config.init_config()