def __init__(self, cfg, environment, sess, model_dir): super(Player, self).__init__(cfg) self.sess = sess self.inputs = tf.placeholder('float32', [ None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='inputs') self.target_inputs = tf.placeholder('float32', [ None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='target_inputs') self.target_q_t = tf.placeholder('float32', [None], name='target_q_t') self.action = tf.placeholder('int64', [None], name='action') self.env = environment self.history = History(self.cfg) self.model_dir = model_dir self.memory = ExperienceBuffer(cfg, self.model_dir) self.learning_rate_minimum = 0.0001 self.double_q = True
def __init__(self, cfg, environment, sess, model_dir, state='image', state_dim=3, log_file_pathname='/tmp/deeprl.log', verbosity=1, lr_policy=PolyDecayPolicy(0.001), start_epoch=1, resume_lr=0.001, n_iters_per_epoch=100, gpu_memory_fraction=0.9): self.cfg = cfg self.sess = sess self.weight_dir = 'weights' self.env = environment self.history = History( self.cfg, log=log, state=state, state_dim=state_dim) self.model_dir = model_dir self.memory = ExperienceBuffer( self.cfg, self.model_dir, log=log, state=state, state_dim=state_dim) self.learning_rate_minimum = 0.0001 self.learning_rate = tf.placeholder( tf.float32, shape=[], name="learning_rate_placeholder") self.lr_policy = lr_policy self.lr_policy.start_epoch = start_epoch self.lr_policy.base_lr = resume_lr self.lr_policy.n_iters_per_epoch = n_iters_per_epoch self.gpu_memory_fraction = gpu_memory_fraction log.setFileHandler(log_file_pathname) log.setVerbosity(str(verbosity)) with tf.variable_scope('step'): self.step_op = tf.Variable(0, trainable=False, name='step') self.step_input = tf.placeholder('int32', None, name='step_input') self.step_assign_op = self.step_op.assign(self.step_input)
def map_index(): city_name = g.city_name today = time.strftime("%Y-%m-%d", time.localtime()) date_end = request.args.get('date_end', default=today) hd_type = request.args.get('type', default='aqi') condition = { 'date': date_end, } hd_types = ['aqi', 'quality', 'pm25', 'pm10', 'so2', 'co', 'no2', 'o3'] if hd_type not in hd_types: hd_type = 'aqi' field = 'hd_' + hd_type history_client = History.factory() city_all = history_client.all_city(condition) data = dict() data['current_page'] = 'map' data['city_name'] = city_name data['city_all'] = city_all data['req_args'] = dict(request.args.items()) data['date_end'] = date_end data['type'] = hd_type data['field'] = field return render_template('map/index.html', **data)
def report_aqi_trend(): city_name = g.city_name today = time.strftime("%Y-%m-%d", time.localtime()) day7_dt = datetime.datetime.now() - datetime.timedelta(days=7) day7 = day7_dt.strftime("%Y-%m-%d") date_start = request.args.get('date_start', default=day7) date_end = request.args.get('date_end', default=today) condition = { 'city_name': city_name, 'date_start': date_start, 'date_end': date_end } history_client = History.factory() history_all = history_client.all_day(condition) history_types = [u'优', u'良', u'轻度污染', u'中度污染', u'重度污染', u'严重污染'] for history in history_all: history['hd_quality_index'] = history_types.index( history['hd_quality']) data = dict() data['current_page'] = 'report' data['req_args'] = dict(request.args.items()) data['history_all'] = history_all data['city_name'] = city_name data['date_start'] = date_start data['date_end'] = date_end return render_template('report/aqi_trend.html', **data)
def report_aqi_total(): city_name = g.city_name today = time.strftime("%Y-%m-%d", time.localtime()) day7_dt = datetime.datetime.now() - datetime.timedelta(days=7) day7 = day7_dt.strftime("%Y-%m-%d") date_start = request.args.get('date_start', default=day7) date_end = request.args.get('date_end', default=today) condition = { 'city_name': city_name, 'date_start': date_start, 'date_end': date_end } history_client = History.factory() hd_quality = history_client.total_history(condition, 'hd_quality') data = dict() data['current_page'] = 'report' data['req_args'] = dict(request.args.items()) data['hd_quality'] = hd_quality data['city_name'] = city_name data['date_start'] = date_start data['date_end'] = date_end return render_template('report/aqi_total.html', **data)
def data_history(): city_name = g.city_name history_client = History.factory() history_city = history_client.get_city_by_name(city_name) if not history_city: return u'暂时不支持此城市的天气数据查询' weather_client = Weather.factory() weather_city = weather_client.get_city_by_name(city_name) today = time.strftime("%Y-%m-%d", time.localtime()) day7_dt = datetime.datetime.now() - datetime.timedelta(days=7) day7 = day7_dt.strftime("%Y-%m-%d") page = request.args.get('page', 1, type=int) date_start = request.args.get('date_start', default=day7) date_end = request.args.get('date_end', default=today) condition = { 'city_name': city_name, 'date_start': date_start, 'date_end': date_end } history_client = History.factory() info = history_client.search_day(condition, page, 31) # print json.dumps(dict(info), indent=7, ensure_ascii=False) # print str(info) info['pages'] = min(7, info['pages']) data = dict() data['current_page'] = 'data' data['date_start'] = date_start data['date_end'] = date_end data['history_city'] = history_city data['weather_city'] = weather_city data['req_args'] = dict(request.args.items()) data['info'] = info data['page'] = page return render_template('data/history.html', **data)
def data_index(): city_name = g.city_name history_client = History.factory() history_city = history_client.get_city_by_name(city_name) weather_client = Weather.factory() weather_city = weather_client.get_city_by_name(city_name) crawl_client = Crawl.factory() if request.method == 'POST': job_id = request.form.get('job_id', default=0) if not job_id: return json.dumps({'status': False, 'message': u'没有任务id!'}) job_info = crawl_client.get_job_info_by_id(job_id) if not job_info: return json.dumps({'status': False, 'message': u'没有任务信息!'}) if 'city_name' not in job_info or job_info['city_name'] != city_name: return json.dumps({'status': False, 'message': u'此城市没有任务信息!'}) # scrapy crawl aqistudy -a city_name=上海 -a month=2017-01 sp = Spider.factory() m = job_info['job_month'] if int(m) < 10: m = '0' + str(m) month = str(job_info['job_year']) + '-' + str(m) result = sp.schedule_job(spider=job_info['job_spider'], setting=[], jobid=job_id, city_name=city_name, month=month) if not result: return json.dumps({'status': False, 'message': u'启动任务失败!'}) return json.dumps({'status': True, 'message': u'启动任务成功!'}) current = datetime.datetime.now() job_list = crawl_client.get_job_list(city_name, current.year, current.month) # print job_list data = dict() data['current_page'] = 'data' data['history_city'] = history_city data['weather_city'] = weather_city data['req_args'] = dict(request.args.items()) data['job_list'] = job_list return render_template('data/index.html', **data)
def learn_index(): city_name = g.city_name today = time.strftime("%Y-%m-%d", time.localtime()) day7_dt = datetime.datetime.now() - datetime.timedelta(days=7) day7 = day7_dt.strftime("%Y-%m-%d") date_start = request.args.get('date_start', default=day7) date_end = request.args.get('date_end', default=today) history = request.args.get('history', default=1, type=int) weather = request.args.get('weather', default=1, type=int) condition = { 'city_name': city_name, 'date_start': date_start, 'date_end': date_end } history_client = History.factory() weather_client = Weather.factory() history_count = 0 weather_count = 0 if history == 1: history_count = history_client.count_history(condition) if weather == 1: weather_count = weather_client.count_weather(condition) data = dict() data['current_page'] = 'learn' data['req_args'] = dict(request.args.items()) data['city_name'] = city_name data['date_start'] = date_start data['date_end'] = date_end data['history'] = history data['weather'] = weather data['history_count'] = history_count data['weather_count'] = weather_count return render_template('learn/index.html', **data)
class Player(Base): def __init__(self, cfg, environment, sess, model_dir): super(Player, self).__init__(cfg) self.sess = sess self.inputs = tf.placeholder('float32', [ None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='inputs') self.target_inputs = tf.placeholder('float32', [ None, self.cfg.screen_height, self.cfg.screen_width, self.cfg.history_length], name='target_inputs') self.target_q_t = tf.placeholder('float32', [None], name='target_q_t') self.action = tf.placeholder('int64', [None], name='action') self.env = environment self.history = History(self.cfg) self.model_dir = model_dir self.memory = ExperienceBuffer(cfg, self.model_dir) self.learning_rate_minimum = 0.0001 self.double_q = True def play(self, load_model=True, test_ep=None, num_step=100000, num_episodes=200, display=True): model_q = Model() model_target_q = Model(is_target_q=True) end_points_q = model_q.model_def(self.inputs, self.env, name='main_q') _ = model_target_q.model_def( self.target_inputs, self.env, name='target_q') init = tf.global_variables_initializer() self.saver = tf.train.Saver(max_to_keep=None) if load_model: utils.load_model(self.saver, self.sess, self.model_dir) else: self.sess.run(init) if test_ep is None: test_ep = self.cfg.ep_test if not display: gym_dir = '/tmp/%s-%s' % (self.cfg.env_name, utils.get_time()) self.env.env.monitor.start(gym_dir) best_reward, best_episode = 0, 0 for episode in xrange(num_episodes): screen, reward, action, terminal = self.env.new_random_game() current_reward = 0 for _ in xrange(self.cfg.history_length): self.history.add(screen) for t in tqdm(xrange(num_step), ncols=70): # 1. predict action = self.predict( end_points_q['pred_action'], self.history.get(), ep=test_ep) # 2. act screen, reward, terminal = self.env.act( action, is_training=False) # 3. observe self.history.add(screen) current_reward += reward if terminal: break if current_reward > best_reward: best_reward = current_reward best_episode = episode print " [%d] Best reward : %d" % (best_episode, best_reward) if not display: self.env.env.monitor.close()
def main(): # Init Colorama init(strip=not sys.stdout.isatty()) # Constants root_dir = os.path.dirname(os.path.abspath(__file__)) title = "Chika's Script\nv0.1" # Set routines add_anime = AddAnime() download = Download() history = History() # Grab ASCII Art to display chika = MenuArt().chika # Get username try: with open(os.path.join(root_dir, 'data\\settings.json'), 'r', encoding='utf-8') as settings: data = json.load(settings) username = '******' + data["username"] if data["username"] != '' else '' except Exception as e: username = '' print("Error parsing JSON settings file: ", e) exit_script = False while not exit_script: # Clear the screen sp.call('cls', shell=True) # Show main menu print(title) print(colored(chika, "cyan")) print('\t\t\t\tOptions:', end="") print(colored(' 1) Download ', 'magenta'), end="") print('⡇', end="") print(colored(' 2) Add new anime ', 'green'), end="") print('⡇', end="") print(colored(' 3) History ', 'yellow'), end="") print('⡇', end="") print(colored(' 0) Exit ', 'red')) # Run routines try: opt = int(input(f'Welcome{username}!\nPlease, select an option: ')) if opt == 1: download.start() download.join() elif opt == 2: add_anime.start(root_dir) elif opt == 3: history.display_history(root_dir) elif opt == 0: print('See ya ;)') exit_script = True else: print('This is not a valid option!') except Exception as e: print('Error: ', e)
def run(self): # User input to_download = input("Anime/episode -> ") # User input properly divided # TODO this in a function with option to multiple episodes and animes anime = to_download.split()[0] episode = to_download.split()[1] # Setting the file name self.file_name = anime + '_ep' + episode + '.mp4' # Starting loading thread loading_thread = Loading() loading_thread.start() try: # GET request self.driver.get(self.set_url_episode(anime, episode)) # Finding and switching context to the iframe holding the video frame = self.driver.find_element_by_css_selector( 'div.pframe iframe') self.driver.switch_to.frame(frame) # Getting video URL video = self.driver.find_element_by_tag_name( 'video').get_attribute('src') # Close selenium self.driver.close() # Finish loading thread LoaderCondition.loaded = True # Set complete path of file complete_path = os.path.join(self.path_to_save, self.file_name) # Setting youtube-dl options ydl_opts = { 'outtmpl': complete_path, 'quiet': True, 'logger': MyLogger(), 'progress_hooks': [DownloadInfo.download_progress_bar] } # Get start time start = time() today = datetime.now() hour = str(today.hour) minute = str(today.minute) if len(str( today.minute)) > 1 else '0' + str(today.minute) # Performing the download with youtube_dl.YoutubeDL(ydl_opts) as ydl: # TODO the possibility to download more than one episode print( f'Downloading {anime} episode {episode} to {self.path_to_save}' ) ydl.download([video]) # Prepare to add to history end = time() total_time = DownloadInfo.calc_time(start, end) size = str(int(os.path.getsize(complete_path) / 1000000)) new_item = { DownloadInfo.current_day(): { "anime": anime, "episode": episode, "start-hour": f"{hour}:{minute}", "file-size": f"~{size}Mb", "took:": total_time } } # Add the current download to history History.add_to_history(new_item, self.root_dir) return except Exception as e: print('\nError: ', e)
def main(): setup_path() # Parse command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( "--debug", help="Increase output verbosity", action="store_true") args_parser.add_argument("--tweak", help="Set time and date to tweaked value") args_parser.add_argument("--chords", help="Directory to look for scripts in") args_parser.add_argument( "--history", help="List previous chord executions" ) args = args_parser.parse_args() # Set up logging import core.log as log logging = log.getLogger(args.debug) logging.debug('Using Python %s', platform.python_version()) # Load configuration from core.config import config as conf # Set up database db = setup_db() # Where are the chords kept? if args.chords: if os.path.isdir(args.chords): chordsDirectory = args.chords else: try: chordsDirectoryFromConf = conf.get('ocarina', 'chords') if os.path.isdir(chordsDirectoryFromConf): chordsDirectory = chordsDirectoryFromConf except: chordsDirectory = os.path.abspath( os.path.join(currentDirectory, '../chords')) sys.path.insert(0, chordsDirectory) # Where are the virtual environments kept? try: virtualEnvDirectoryFromConf = conf.get('ocarina', 'virtualenv') if os.path.isdir(virtualEnvDirectoryFromConf): virtualEnvDirectory = virtualEnvDirectoryFromConf except: virtualEnvDirectory = os.path.abspath( os.path.join(currentDirectory, '../.virtualenv')) shouldRunChords = not args.history if shouldRunChords: import core.chords as chords from core.now import Now now = Now(args.tweak) chords.run(chordsDirectory, virtualEnvDirectory, now, logging) if args.history: from core.history import History history = History( db ) history.listPrevious( args.history )
def learn_step2(): city_name = g.city_name today = time.strftime("%Y-%m-%d", time.localtime()) day7_dt = datetime.datetime.now() - datetime.timedelta(days=7) day7 = day7_dt.strftime("%Y-%m-%d") date_start = request.args.get('date_start', default=day7) date_end = request.args.get('date_end', default=today) history = request.args.get('history', default=1, type=int) weather = request.args.get('weather', default=1, type=int) day_num = request.args.get('day_num', default=1, type=int) condition = { 'city_name': city_name, 'date_start': date_start, 'date_end': date_end } history_client = History.factory() weather_client = Weather.factory() history_count = 0 weather_count = 0 if history == 1: history_count = history_client.count_history(condition) if weather == 1: weather_count = weather_client.count_weather(condition) if request.method == 'POST': histories = request.form.getlist('histories[]') weathers = request.form.getlist('weathers[]') # print histories, weathers if not histories and not weathers: return json.dumps({'status': False, 'message': u'请您勾选特征数据类型!'}) if history_count < 1 and weather_count < 1: return json.dumps({ 'status': False, 'message': u'选定的日期内没有数据,请返回上一步重新选择!' }) if (history_count + weather_count) < 7: return json.dumps({ 'status': False, 'message': u'数据至少应该有7条(天)以上才能进行机器学习!' }) learn_client = Learn.factory() job_id = learn_client.create_job({ 'learn_status': Learn.JOB_READY, 'city_name': city_name, 'date_start': date_start, 'date_end': date_end, 'history': history, 'weather': weather, 'histories': json.dumps(histories), 'weathers': json.dumps(weathers), 'target': 'PM25', 'day_num': day_num }) if not job_id: return json.dumps({'status': False, 'message': u'建立机器学习任务失败!'}) return json.dumps({'status': True, 'message': job_id}) data = dict() data['current_page'] = 'learn' data['req_args'] = dict(request.args.items()) data['city_name'] = city_name data['date_start'] = date_start data['date_end'] = date_end data['history'] = history data['weather'] = weather data['history_count'] = history_count data['weather_count'] = weather_count return render_template('learn/step2.html', **data)