def main(config): prepare_dirs_and_logger(config) tf.compat.v1.set_random_seed(config.random_seed) if 'nn' in config.arch: from data_nn import BatchManager else: from data import BatchManager batch_manager = BatchManager(config) if config.is_3d: trainer = Trainer3(config, batch_manager) else: trainer = Trainer(config, batch_manager) print("---------------------------------") print("| |") print("| |") print("| prepare trainer |") print("| is done |") print("| |") print("| |") print("---------------------------------") if config.is_train: save_config(config) trainer.train() else: if not config.load_path: raise Exception( "[!] You should specify `load_path` to load a pretrained model" ) trainer.test()
def test_create_client_with_alias(self): with temppath() as tpath: config = Config(path=tpath) section = 'dev.alias' config.add_section(section) config.set(section, 'url', 'http://host:port') save_config(config) Config(path=tpath).get_client('dev')
def test_disable_file_logging(self): with temppath() as tpath: config = Config(tpath) config.add_section('cmd.command') config.set('cmd.command', 'log.disable', 'true') save_config(config) config = Config(tpath) handler = config.get_log_handler('cmd') ok_(not isinstance(handler, TimedRotatingFileHandler))
def drive_changelog(yesterday, html): """ Do something """ drive = util.get_driveclient() start_change_id = util.CONFIG.get("changestamp", "1") html += """<p><table border="1" cellpadding="3" cellspacing="0"> <thead> <tr><th>Changestamp</th><th>Time</th><th>Author</th><th>Resource</th></tr> </thead> <tbody>""" largestChangeId = -1 hits = 0 page_token = None while True: param = {} if start_change_id: param['startChangeId'] = start_change_id if page_token: param['pageToken'] = page_token print(("Requesting start_change_id: %s " "largestChangeId: %s page_token: %s" ) % (start_change_id, largestChangeId, page_token)) response = drive.changes().list(**param).execute() largestChangeId = response['largestChangeId'] page_token = response.get('nextPageToken') for item in response['items']: changestamp = item['id'] if item['deleted']: continue modifiedDate = datetime.datetime.strptime( item['file']['modifiedDate'][:19], '%Y-%m-%dT%H:%M:%S') modifiedDate = modifiedDate.replace(tzinfo=pytz.timezone("UTC")) if modifiedDate < yesterday: continue uri = item['file']['alternateLink'] title = item['file']['title'] author = item['file']['lastModifyingUserName'] localts = modifiedDate.astimezone(pytz.timezone("America/Chicago")) hits += 1 html += """ <tr><td>%s</td><td>%s</td><td>%s</td><td><a href="%s">%s</a></td></tr> """ % (changestamp, localts.strftime("%-d %b %I:%M %P"), author, uri, title) if not page_token: break util.CONFIG['changestamp'] = changestamp if hits == 0: html += """<tr><td colspan="4">No Changes Found...</td></tr>\n""" html += """</tbody></table>""" util.save_config() return html
def config(access_key_id, secret_access_key): """ 配置密钥信息 ACCESS_KEY_ID 申请的API密钥ID SECRET_ACCESS_KEY 申请的API密钥 """ save_config(access_key_id=access_key_id, secret_access_key=secret_access_key) click.echo("密钥ID配置成功")
def test_create_client_with_alias_and_timeout(self): with temppath() as tpath: config = Config(path=tpath) section = 'dev.alias' config.add_section(section) config.set(section, 'url', 'http://host:port') config.set(section, 'timeout', '1') save_config(config) eq_(Config(path=tpath).get_client('dev')._timeout, 1) config.set(section, 'timeout', '1,2') save_config(config) eq_(Config(path=tpath).get_client('dev')._timeout, (1,2))
def convert_legacy_config(trial_dir, t_agent): legacy_config_file = os.path.join(trial_dir, "config.pk") config_file = os.path.join(trial_dir, "config.yaml") if not os.path.exists(legacy_config_file): raise ValueError("No config file found in {}".format(trial_dir)) else: legacy_config = util.load_legacy_config(legacy_config_file) default_config = util.load_config("default.yaml")[t_agent] for k in default_config: if k in legacy_config: default_config[k] = legacy_config[k] default_config["agent"] = t_agent util.save_config(config_file, default_config)
def settings(): tables = models.components.keys() form = forms.create_prefs_form() if form.validate_on_submit(): form.populate_obj(util.AttributeWrapper(app.config)) util.save_config(app.config, CONFIG_PATH) warning = library.check() if warning: flash(warning, "error") flash("Your settings have been saved.", "success") models.create() return redirect(request.referrer) return render_template('settings.html', form=form, tables=tables)
def event_reconfig(): log.debug(f"Handling 'request={data['request']}'.") lkp.clear_template_info_cache() if lkp.instance_role == "controller": # Inactive all partitions to prevent further scheduling partitions = get_partitions() update_partitions(partitions, "INACTIVE") # Fetch and write new config.yaml util.cfg = util.config_from_metadata() if not util.cfg.pubsub_topic_id: log.info("Auto reconfigure is disabled. Aborting...") exit(0) util.save_config(util.cfg, util.CONFIG_FILE) util.lkp = util.Lookup(util.cfg) # Regenerate *.conf files log.info("Clean install custom scripts") setup.install_custom_scripts(clean=True) log.info("Generating new cloud.conf for slurm.conf") setup.gen_cloud_conf(util.lkp) log.info("Generating new slurm.conf") setup.install_slurm_conf(util.lkp) log.info("Generating new slurmdbd.conf") setup.install_slurmdbd_conf(util.lkp) log.info("Generating new cloud_gres.conf") setup.gen_cloud_gres_conf(util.lkp) log.info("Generating new cgroup.conf") setup.install_cgroup_conf() # Send restart message to cluster topic message_json = json.dumps({ "request": "restart", "timestamp": datetime.utcnow().isoformat(), }) publish_message(project, util.cfg.pubsub_topic_id, message_json) elif lkp.instance_role == "compute": log.info(f"NO-OP for 'Request={data['request']}'.") elif lkp.instance_role == "login": log.info(f"NO-OP for 'Request={data['request']}'.") else: log.error(f"Unknown node role: {lkp.instance_role}")
def main(config): prepare_dirs_and_logger(config) tf.set_random_seed(config.random_seed) from data import BatchManager batch_manager = BatchManager(config) trainer = Trainer_tumor(config, batch_manager) if config.is_train: save_config(config) trainer.train() else: if not config.load_path: raise Exception( "[!] You shou/home/tudorld specify `load_path` to load a pretrained model" ) trainer.test()
def setup_model(self): """Creates a SFUN object. Returns: keras model """ self.checkpoints_path = os.path.join(self._config['training']['session_dir'], 'checkpoints') if not os.path.exists(self.checkpoints_path): os.mkdir(self.checkpoints_path) self.history_filename = 'history_' + self._config['training']['session_dir'][self._config['training']['session_dir'].rindex('/') + 1:] + '.csv' self.model, inputs_mask = self.build_model(train_bn=self.train_bn) self.compile_sfun(self.model, inputs_mask, self._config['training']['lr']) self._config['dataset']['num_freq'] = self.num_freq config_path = os.path.join(self._config['training']['session_dir'], 'config.json') if os.path.exists(self.checkpoints_path) and util.dir_contains_files(self.checkpoints_path): checkpoints = os.listdir(self.checkpoints_path) checkpoints.sort(key=lambda x: os.stat(os.path.join(self.checkpoints_path, x)).st_mtime) last_checkpoint = checkpoints[-1] last_checkpoint_path = os.path.join(self.checkpoints_path, last_checkpoint) self.epoch_num = int(last_checkpoint[11:16]) print('Loading Sound Field Network model from epoch: %d' % self.epoch_num) self.model.load_weights(last_checkpoint_path) else: print('Building new Sound Field Network model...') self.epoch_num = 0 self.model.summary() if not os.path.exists(config_path): util.save_config(config_path, self._config) return self.model
import os, datetime from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy from session import SqliteSessionInterface import util CONFIG_FILE = 'altium.cfg' app = Flask(__name__) CONFIG_PATH = os.path.join(app.root_path, CONFIG_FILE) app.config.from_object('altium.config') app.config.from_pyfile(CONFIG_PATH, silent=True) util.save_config(app.config, CONFIG_PATH) # Server-side sessions path = app.config['SESSION_PATH'] path = os.path.join(app.root_path, '.sessions') if not os.path.exists(path): os.mkdir(path) os.chmod(path, int('700', 8)) app.session_interface = SqliteSessionInterface(path) # Initial check of the library to establish SVN data library = util.SVNLibrary() #library.check() db = SQLAlchemy(app) import hooks
def run_training(): '''train the Neural Network''' # sanity check assert (FLAGS.input_data_type == 'float' or FLAGS.input_data_type == 'int') assert (FLAGS.output_data_type == 'float' or FLAGS.output_data_type == 'int') # import the dataset data_sets = dataset.Datasets(FLAGS.data_dir, FLAGS.separate_file, FLAGS.input_data_type, FLAGS.output_data_type) #for hotspot training ''' data_sets = dataset.Datasets(FLAGS.data_dir, FLAGS.separate_file, FLAGS.input_data_type, FLAGS.output_data_type, FLAGS.tile_size, FLAGS.num_maps) ''' with tf.Graph().as_default(): # placeholder input_pl, golden_pl = util.generate_placeholder( data_sets.num_in_neuron, data_sets.num_out_neuron, FLAGS.batch_size, FLAGS.input_data_type, FLAGS.output_data_type) # build graph if FLAGS.hidden1 == 0: assert (FLAGS.hidden2 == 0) outputs = util.layer('output_layer', input_pl, data_sets.num_in_neuron, data_sets.num_out_neuron, None) else: hidden1 = util.layer('hidden1', input_pl, data_sets.num_in_neuron, FLAGS.hidden1, util.fast_sigmoid) if FLAGS.hidden2 == 0: outputs = util.layer('output_layer', hidden1, FLAGS.hidden1, data_sets.num_out_neuron, None) else: hidden2 = util.layer('hidden2', hidden1, FLAGS.hidden1, FLAGS.hidden2, util.fast_sigmoid) outputs = util.layer('output_layer', hidden2, FLAGS.hidden2, data_sets.num_out_neuron, None) # loss #loss = bm.loss(outputs, golden_pl) loss = util.loss(outputs, golden_pl, FLAGS.benchmark) # train #train_op = bm.training(loss, FLAGS.learning_rate) train_op = util.training(loss, FLAGS.learning_rate) # accumulated error for one batch of data error = util.error(outputs, golden_pl, FLAGS.benchmark) # summary - not necessary summary = tf.merge_all_summaries() # init init = tf.initialize_all_variables() # sess sess = tf.Session() # summary writer - not necessary summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph) # everything built, run init sess.run(init) # start training #_, max_steps = data_sets.train.max_steps(FLAGS.batch_size) for step in xrange(FLAGS.max_steps): feed_dict = util.fill_feed_dict(data_sets.train, input_pl, golden_pl, FLAGS.batch_size) sess.run(train_op, feed_dict=feed_dict) # print the loss every 100 steps # write the summary # evaluate the model if not step % 100: print('step %d: loss = %.2f' % (step, sess.run(loss, feed_dict=feed_dict))) summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() ''' print('training data evaluation') util.do_eval(sess, error, input_pl, golden_pl, FLAGS.batch_size, data_sets.train) ''' print('validation data evaluation') util.do_eval(sess, error, input_pl, golden_pl, FLAGS.batch_size, data_sets.validate) # final accuracy print('test data evaluation') util.do_eval(sess, error, input_pl, golden_pl, FLAGS.batch_size, data_sets.test) # filename for saving savefile = str(data_sets.num_in_neuron) + "_" + str( FLAGS.hidden1) + "_" + str(FLAGS.hidden2) + "_" + str( data_sets.num_out_neuron) + ".txt" # save weights and biases util.save_config(sess, NUM_LAYERS, FLAGS.config_dir, savefile) # save trained output #util.save_output(sess, data_sets.train, outputs, FLAGS.data_dir) #need to fetch original input data output_save = sess.run(outputs, feed_dict={input_pl: data_sets.input_data}) np.savetxt(FLAGS.data_dir + "train_result/" + savefile, output_save, delimiter=" ")
for index, playlist in enumerate(playlists): print(str(index) + " | " + playlist) elif (arg == 'remote'): playlists = user.get_playlist_names() for index, playlist in enumerate(playlists): print(str(index) + " | " + playlist) else: print('Not yet implemented.') print('Show all playlists, local and remote') elif (cmd == 'select' and arg != None): ids = user.get_playlist_id(arg) config["current_playlist"]["uid"] = ids["uid"] config["current_playlist"]["pid"] = ids["pid"] config["current_playlist"]["name"] = user.get_playlist_name(arg) util.save_config(config) print('Set current playlist to ' + config["current_playlist"]["name"]) elif (cmd == 'clone' and arg != None): ids = user.get_playlist_id(arg) config["current_playlist"]["uid"] = ids["uid"] config["current_playlist"]["pid"] = ids["pid"] config["current_playlist"]["name"] = user.get_playlist_name(arg) util.save_config(config) try: user.init_git_playlist(ids["uid"], ids["pid"]) print('Cloned playlist ' + config["current_playlist"]["name"]) except: print(config["current_playlist"]["name"] + ' already cloned.')
def init(self, settings, settingsFile): for k in self._baseTypes.keys(): self._codeTables[k] = self._baseTypes[k].init(settings) util.save_config(settings, settingsFile)
def run(self): done = False while not done: self.screen.blit(Options.sky, self.screen.get_rect()) self.water.update() self.water_sprite.draw(self.screen) for i in xrange(len(self.menu)): self.render(i) cloud.update() cloud.draw(self.screen) rect = Options.logo.get_rect() rect.centerx = self.screen.get_rect().centerx rect.top = 0 self.screen.blit(Options.logo, rect) image = util.smallfont.render("http://funnyboat.sourceforge.net/", True, (0, 0, 0)) rect = image.get_rect() rect.midbottom = self.screen.get_rect().midbottom self.screen.blit(image, rect) pygame.display.flip() self.t += 1 nextframe = False while not nextframe: pygame.event.post(pygame.event.wait()) for event in pygame.event.get(): if event.type == QUIT or \ event.type == KEYDOWN and event.key == K_ESCAPE: self.selection = -1 done = True nextframe = True elif event.type == NEXTFRAME: nextframe = True elif event.type == JOYAXISMOTION: if event.axis == 1: if event.value < -0.5: self.move_up() if event.value > 0.5: self.move_down() elif event.axis == 0: if event.value < -0.5: self.change_left() if event.value > 0.5: self.change_right() elif event.type == JOYBUTTONDOWN: if self.gamepad: if self.gamepad.is_pressed('a', event): self.change_right() elif self.gamepad.is_pressed('b', event): done = True elif event.button == 0: #done = True self.change_right() elif event.button == 1: done = True elif event.type == KEYDOWN: if event.key == K_UP: self.move_up() elif event.key == K_DOWN: self.move_down() elif event.key == K_LEFT: self.change_left() elif event.key == K_RIGHT: self.change_right() elif self.selection == Options.NAME: if event.key == K_BACKSPACE: if len(Variables.name) != 0: Variables.name = Variables.name[:-1] elif event.key == K_SPACE or event.unicode != " " and event.unicode >= u' ': if len(Variables.name) < 32: Variables.name += event.unicode self.refresh() elif event.key == K_SPACE or event.key == K_RETURN: #done = True self.change_right() util.save_config() return self.selection
def train(config, trial_dir=None, visualize=False, overwrite_config=False): t_agent = config["agent"] assert t_agent in SUPPORTED_AGENTS, "Agent type {} not supported".format( t_agent) # prepare trial environment pid = os.getpid() trial_name = "{}_pid{}".format(t_agent, pid) logger, log_dir = prepare_for_logging(trial_name) # create agent if "max_obstacles" not in config: config["max_obstacles"] = 3 env = NIPS(visualize, max_obstacles=config["max_obstacles"]) logger.info("pid={}, env={}".format(pid, id(env))) # to train from scratch or fine tune fine_tuning = False if trial_dir is not None: config_file = os.path.join(trial_dir, "config.yaml") if not os.path.exists(config_file): convert_legacy_config(trial_dir, t_agent) existing_config = util.load_config(config_file) fine_tuning = True if overwrite_config: logger.info("Overwrite config from file {}".format(trial_dir)) for k, v in config.iteritems(): existing_config[k] = v config = existing_config config["model_dir"] = trial_dir # save config to the trial folder util.print_settings(logger, config, env) config_file = os.path.join(log_dir, "config.yaml") util.save_config(config_file, config) # instantiate an agent config["logger"] = logger config["log_dir"] = log_dir if t_agent == "DDPG": from ddpg import DDPG agent = DDPG(env, config) elif t_agent == "TRPO": from trpo import TRPO agent = TRPO(env, config) else: # because of the assertion above, this should never happen raise ValueError("Unsupported agent type: {}".format(t_agent)) # learn if fine_tuning: util.print_sec_header(logger, "Continual training") agent.set_state(config) else: util.print_sec_header(logger, "Training from scratch") reward_hist, steps_hist = agent.learn( total_episodes=config["total_episodes"]) env.close() # send result img_file = os.path.join(log_dir, "train_stats.png") util.plot_stats(reward_hist, steps_hist, img_file) log_file = os.path.join(log_dir, "train.log") util.send_email(log_dir, [img_file], [log_file], config) logger.info("Finished (pid={}).".format(pid))
def drive_changelog(regime, yesterday, html): """ Do something """ drive = util.get_driveclient() start_change_id = util.CONFIG.get("changestamp_"+regime, "1") html += """<p><table border="1" cellpadding="3" cellspacing="0"> <thead> <tr><th>Changestamp</th><th>Time</th><th>Author</th><th>Resource</th></tr> </thead> <tbody>""" largestChangeId = -1 hits = 0 page_token = None while True: param = {} if start_change_id: param['startChangeId'] = start_change_id if page_token: param['pageToken'] = page_token print(("Requesting start_change_id: %s " "largestChangeId: %s page_token: %s" ) % (start_change_id, largestChangeId, page_token)) response = drive.changes().list(**param).execute() largestChangeId = response['largestChangeId'] page_token = response.get('nextPageToken') for item in response['items']: changestamp = item['id'] if item['deleted']: continue # don't do more work when this file actually did not change modifiedDate = datetime.datetime.strptime( item['file']['modifiedDate'][:19], '%Y-%m-%dT%H:%M:%S') modifiedDate = modifiedDate.replace(tzinfo=pytz.timezone("UTC")) if modifiedDate < yesterday: continue # Need to see which base folder this file is in! parentid = get_base_folder_id(regime, drive, item['fileId']) if parentid != util.CONFIG[regime]['basefolder']: print(('Skipping %s as it is other project' ) % (repr(item['file']['title']), )) continue uri = item['file']['alternateLink'] title = item['file']['title'].encode('ascii', 'ignore') author = item['file']['lastModifyingUserName'] localts = modifiedDate.astimezone(pytz.timezone("America/Chicago")) hits += 1 html += """ <tr><td>%s</td><td>%s</td><td>%s</td><td><a href="%s">%s</a></td></tr> """ % (changestamp, localts.strftime("%-d %b %I:%M %P"), author, uri, title) if not page_token: break util.CONFIG['changestamp_'+regime] = changestamp if hits == 0: html += """<tr><td colspan="4">No Changes Found...</td></tr>\n""" html += """</tbody></table>""" util.save_config() return html
def main(config): logger = util.create_logger(name='train_log', log_dir=config.log_dir) if not os.path.exists(config.log_dir): os.makedirs(config.log_dir, exist_ok=True) util.save_config(config.log_dir, config.config) logger.info('Logs and models will be save in {}.'.format(config.log_dir)) rnd = np.random.RandomState(seed=config.seed) solution = util.create_solution(device='cpu:0') num_params = solution.get_num_params() if config.load_model is not None: solution.load(config.load_model) print('Loaded model from {}'.format(config.load_model)) init_params = solution.get_params() else: init_params = None solver = cma.CMAEvolutionStrategy( x0=np.zeros(num_params) if init_params is None else init_params, sigma0=config.init_sigma, inopts={ 'popsize': config.population_size, 'seed': config.seed if config.seed > 0 else 42, 'randn': np.random.randn, }, ) best_so_far = -float('Inf') ii32 = np.iinfo(np.int32) repeats = [config.reps] * config.population_size device_type = 'cpu' if args.num_gpus <= 0 else 'cuda' num_devices = mp.cpu_count() if args.num_gpus <= 0 else args.num_gpus with mp.get_context('spawn').Pool( initializer=worker_init, initargs=(args.config, device_type, num_devices), processes=config.num_workers, ) as pool: for n_iter in range(config.max_iter): params_set = solver.ask() task_seeds = [rnd.randint(0, ii32.max)] * config.population_size fitnesses = [] ss = 0 while ss < config.population_size: ee = ss + min(config.num_workers, config.population_size - ss) fitnesses.append( pool.map(func=get_fitness, iterable=zip(params_set[ss:ee], task_seeds[ss:ee], repeats[ss:ee]))) ss = ee fitnesses = np.concatenate(fitnesses) if isinstance(solver, cma.CMAEvolutionStrategy): # CMA minimizes. solver.tell(params_set, -fitnesses) else: solver.tell(fitnesses) logger.info( 'Iter={0}, ' 'max={1:.2f}, avg={2:.2f}, min={3:.2f}, std={4:.2f}'.format( n_iter, np.max(fitnesses), np.mean(fitnesses), np.min(fitnesses), np.std(fitnesses))) best_fitness = max(fitnesses) if best_fitness > best_so_far: best_so_far = best_fitness model_path = os.path.join(config.log_dir, 'best.npz') save_params(solver=solver, solution=solution, model_path=model_path) logger.info( 'Best model updated, score={}'.format(best_fitness)) if (n_iter + 1) % config.save_interval == 0: model_path = os.path.join(config.log_dir, 'iter_{}.npz'.format(n_iter + 1)) save_params(solver=solver, solution=solution, model_path=model_path)
def run(self): done = False while not done: self.screen.blit(Options.sky, self.screen.get_rect()) self.water.update() self.water_sprite.draw(self.screen) for i in xrange(len(self.menu)): self.render(i) cloud.update() cloud.draw(self.screen) rect = Options.logo.get_rect() rect.centerx = self.screen.get_rect().centerx rect.top = 0 self.screen.blit(Options.logo, rect) image = util.smallfont.render("http://funnyboat.sourceforge.net/", True, (0,0,0)) rect = image.get_rect() rect.midbottom = self.screen.get_rect().midbottom self.screen.blit(image, rect) pygame.display.flip() self.t += 1 nextframe = False while not nextframe: pygame.event.post(pygame.event.wait()) for event in pygame.event.get(): if event.type == QUIT or \ event.type == KEYDOWN and event.key == K_ESCAPE: self.selection = -1 done = True nextframe = True elif event.type == NEXTFRAME: nextframe = True elif event.type == JOYAXISMOTION: if event.axis == 1: if event.value < -0.5: self.move_up() if event.value > 0.5: self.move_down() elif event.axis == 0: if event.value < -0.5: self.change_left() if event.value > 0.5: self.change_right() elif event.type == JOYBUTTONDOWN: if event.button == 0: #done = True self.change_right() elif event.button == 1: done = True elif event.type == KEYDOWN: if event.key == K_UP: self.move_up() elif event.key == K_DOWN: self.move_down() elif event.key == K_LEFT: self.change_left() elif event.key == K_RIGHT: self.change_right() elif self.selection == Options.NAME: if event.key == K_BACKSPACE: if len(Variables.name) != 0: Variables.name = Variables.name[:-1] elif event.key == K_SPACE or event.unicode != " " and event.unicode>=u' ': if len(Variables.name) < 32: Variables.name += event.unicode self.refresh() elif event.key == K_SPACE or event.key == K_RETURN: #done = True self.change_right() util.save_config() return self.selection
def save(self): util.save_config("telegram_bot.json", self.config)
def drive_changelog(regime, yesterday, html): """ Do something """ drive = util.get_driveclient() folders = util.get_folders(drive) start_change_id = util.CONFIG.get("changestamp_" + regime, "1") html += """<p><table border="1" cellpadding="3" cellspacing="0"> <thead> <tr><th>Folder</th><th>Resource</th></tr> </thead> <tbody>""" largestChangeId = -1 hits = 0 page_token = None param = {"includeDeleted": False, "maxResults": 1000} while True: if start_change_id: param["startChangeId"] = start_change_id if page_token: param["pageToken"] = page_token print( ("[%s] start_change_id: %s largestChangeId: %s page_token: %s") % (regime, start_change_id, largestChangeId, page_token) ) response = drive.changes().list(**param).execute() largestChangeId = response["largestChangeId"] page_token = response.get("nextPageToken") for item in response["items"]: changestamp = item["id"] if item["deleted"]: continue # don't do more work when this file actually did not change modifiedDate = datetime.datetime.strptime(item["file"]["modifiedDate"][:19], "%Y-%m-%dT%H:%M:%S") modifiedDate = modifiedDate.replace(tzinfo=pytz.timezone("UTC")) if modifiedDate < yesterday: continue # Need to see which base folder this file is in! isproject = False for parent in item["file"]["parents"]: if parent["id"] not in folders: print(("[%s] file: %s has unknown parent: %s") % (regime, item["id"], parent["id"])) continue if folders[parent["id"]]["basefolder"] == util.CONFIG[regime]["basefolder"]: isproject = True if not isproject: print(("[%s] %s skipped") % (regime, repr(item["file"]["title"]))) continue uri = item["file"]["alternateLink"] title = item["file"]["title"].encode("ascii", "ignore") localts = modifiedDate.astimezone(pytz.timezone("America/Chicago")) hits += 1 pfolder = item["file"]["parents"][0]["id"] html += """ <tr> <td><a href="https://docs.google.com/folderview?id=%s&usp=drivesdk">%s</a></td> <td><a href="%s">%s</a></td></tr> """ % ( pfolder, folders[pfolder]["title"], uri, title, ) hit = False if "version" in item["file"] and item["file"]["mimeType"] != FMIME: lastmsg = "" try: revisions = drive.revisions().list(fileId=item["file"]["id"]).execute() except: print(("[%s] file %s (%s) failed revisions") % (regime, title, item["file"]["mimeType"])) revisions = {"items": []} for item2 in revisions["items"]: md = datetime.datetime.strptime(item2["modifiedDate"][:19], "%Y-%m-%dT%H:%M:%S") md = md.replace(tzinfo=pytz.timezone("UTC")) if md < yesterday: continue localts = md.astimezone(pytz.timezone("America/Chicago")) if "lastModifyingUser" not in item2: print(("[%s] file: %s has no User? %s") % (regime, title, item2)) continue luser = item2["lastModifyingUser"] hit = True thismsg = """ <tr><td colspan="2"><img src="%s" style="height:25px;"/> %s by %s (%s)</td></tr> """ % ( (luser["picture"]["url"] if "picture" in luser else ""), localts.strftime("%-d %b %-I:%M %p"), luser["displayName"], luser["emailAddress"], ) if thismsg != lastmsg: html += thismsg lastmsg = thismsg # Now we check revisions if not hit: luser = item["file"]["lastModifyingUser"] html += """ <tr><td colspan="2"><img src="%s" style="height:25px;"/> %s by %s (%s)</td></tr> """ % ( luser["picture"]["url"] if "picture" in luser else "", localts.strftime("%-d %b %-I:%M %p"), luser["displayName"], luser.get("emailAddress", "n/a"), ) if not page_token: break util.CONFIG["changestamp_" + regime] = changestamp if hits == 0: html += """<tr><td colspan="5">No Changes Found...</td></tr>\n""" html += """</tbody></table>""" util.save_config() return html
def train(config_path): """ Trains a model for a maximum of config.max_epochs epochs Args: config_path: string, path to a config.json file """ # Load configuration if not os.path.exists(config_path): print 'Error: No configuration file present at specified path.' return config = util.load_config(config_path) print 'Loaded configuration from: %s' % config_path # Create session directory if 'session_dir' not in config['training'] or os.path.exists(config['training']['session_dir']): create_new_session(config) # Direct all output to screen and log file util.set_print_to_screen_and_file( os.path.join(config['training']['session_dir'], 'session.log')) model = fcpn.FCPN(config) dataset = data.Dataset(config) dataset.prepare(config['dataset']['refresh_cache']) config['model']['pointnet']['num'] = np.prod(model.get_feature_volume_shape( config['dataset']['training_samples']['spatial_size'], config['model']['pointnet']['spacing'], 1)) enqueue_op, queue_placeholders, queue_batch_placeholders, get_queue_size_op = setup_queue( config['dataset']['training_samples']['num_points'] + config['model']['pointnet']['num'], dataset.get_num_output_voxels(), config['training']['batch_size']) tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = config['training']['gpu']['allow_growth'] tf_config.allow_soft_placement = config['training']['gpu']['allow_soft_placement'] sess = tf.Session(config=tf_config) with sess.as_default(): with tf.device('/gpu:' + str(config['training']['gpu']['id'])): # Batch normalization batch_i = tf.Variable(0, name='batch_i') batch_normalization_decay = util.get_batch_normalization_decay( batch_i, config['training']['batch_size'], config['training']['optimizer']['batch_normalization']['initial_decay'], config['training']['optimizer']['batch_normalization']['decay_rate'], config['training']['optimizer']['batch_normalization']['decay_step']) tf.summary.scalar('batch_normalization_decay', batch_normalization_decay) is_training_pl = tf.placeholder(tf.bool, shape=()) # Build model pred_op = model.build_model(config['training']['batch_size'], config['dataset']['training_samples']['spatial_size'], queue_batch_placeholders['input_points_pl'], queue_batch_placeholders['input_features_pl'], is_training_pl, dataset.get_num_learnable_classes(), batch_normalization_decay) # Loss loss_op = model.get_loss( pred_op, queue_batch_placeholders['output_voxels_pl'], queue_batch_placeholders['output_voxel_weights_pl']) model.print_num_parameters() model.print_layer_weights() # Confusion matrix confusion_matrix_op, confusion_matrix_update_op, confusion_matrix_clear_op = model.get_confusion_matrix_ops( pred_op, queue_batch_placeholders['output_voxels_pl'], dataset.get_num_learnable_classes(), dataset.get_empty_class()) # Optimizer learning_rate_op = util.get_learning_rate( batch_i, config['training']['batch_size'], config['training']['optimizer']['learning_rate']['initial'], config['training']['optimizer']['learning_rate']['decay_rate'], config['training']['optimizer']['learning_rate']['decay_step']) tf.summary.scalar('learning_rate', learning_rate_op) optimizer_op = tf.train.AdamOptimizer(learning_rate_op) if config['training']['train_upsampling_only']: upsampling_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "upsampling") optimization_op = optimizer_op.minimize(loss_op, var_list=upsampling_weights, global_step=batch_i) else: optimization_op = optimizer_op.minimize(loss_op, global_step=batch_i) # Summary and Saving saver = tf.train.Saver(max_to_keep=config['training']['checkpoints_to_keep']) merged_summary_op = tf.summary.merge_all() summary_writers = { 'train': tf.summary.FileWriter(os.path.join(config['training']['session_dir'], 'train'), sess.graph), 'val': tf.summary.FileWriter(os.path.join(config['training']['session_dir'], 'val')) } # Initialize variables in graph init_g = tf.global_variables_initializer() init_l = tf.local_variables_initializer() sess.run([init_g, init_l], {is_training_pl: True}) # Restore model weights from disk if config['training']['checkpoint_path']: weights_to_be_restored = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) # If finetuning on a new dataset, don't load last layer weights or confusion matrix if config['training']['finetune_new_classes']: final_layer_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="upsampling/15cm_to_5cm/final_conv") confusion_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="confusion") weights_to_be_restored = list(set(weights_to_be_restored) - set(final_layer_weights) - set(confusion_variables)) restorer = tf.train.Saver(var_list=weights_to_be_restored) restorer.restore(sess, config['training']['checkpoint_path']) print 'Model weights restored from checkpoint file: %s' % config['training']['checkpoint_path'] num_batches = { 'train': dataset.get_num_batches('train', config['training']['batch_size']), 'val': dataset.get_num_batches('val', config['training']['batch_size']) } ops = { 'train': [loss_op, merged_summary_op, optimization_op], 'val': [loss_op, merged_summary_op, confusion_matrix_update_op] } # Start loading samples into FIFO queue coord, loader_thread = start_data_loader( sess, enqueue_op, queue_placeholders, model, dataset, config) # Save configuration file (with derived parameters) to session directory util.save_config(os.path.join(config['training']['session_dir'], 'config.json'), config) # Start training sample_i = 0 for epoch_i in range(config['training']['max_epochs']): print '\nEpoch: %d' % epoch_i for s in ['train', 'val']: is_training = (s == 'train') if s == 'train': is_training = True print 'Training set\nBatch/Total Batches | Loss | Items in Queue' else: print 'Validation set\nBatch/Total Batches | Loss | Items in Queue' for epoch_batch_i in range(num_batches[s]): loss, summary, _ = sess.run( ops[s], feed_dict={is_training_pl: is_training}) # Log statistics if epoch_batch_i % config['training']['log_every_n_batches'] == 0: summary_writers[s].add_summary(summary, sample_i) summary_writers[s].flush() print '%i/%i | %f | %d' % (epoch_batch_i + 1, num_batches[s], loss, get_queue_size_op.eval()) # Only do when in training phase if s == 'train': sample_i += config['training']['batch_size'] # Save snapshot of model if epoch_batch_i % config['training']['save_every_n_batches'] == 0: save_path = saver.save(sess, os.path.join( config['training']['session_dir'], "model.ckpt"), global_step=epoch_i) print 'Checkpoint saved at batch %d to %s' % ( epoch_batch_i, save_path) # Only do at the end of the validation phase if s == 'train': save_path = saver.save(sess, os.path.join( config['training']['session_dir'], "model.ckpt"), global_step=epoch_i) print 'Checkpoint saved at batch %d to %s' % (epoch_batch_i, save_path) elif s == 'val': confusion_matrix = confusion_matrix_op.eval() # Compute and print per-class statistics true_positives, false_negatives, false_positives, ious = util.compute_per_class_statistics(confusion_matrix[:dataset.get_empty_class(),:dataset.get_empty_class()]) util.pretty_print_confusion_matrix(confusion_matrix, dataset.get_learnable_classes_strings()) util.pretty_print_per_class_statistics(dataset.get_learnable_classes_strings()[:dataset.get_empty_class()], true_positives, false_negatives, false_positives, ious) avg_iou = np.mean(ious) summary = tf.Summary() summary.value.add( tag='avg_iou', simple_value=avg_iou) # Add per-class IoUs to summary to be viewable in Tensorboard for class_i, class_label in enumerate(dataset.get_learnable_classes_strings()[:dataset.get_empty_class()]): summary.value.add( tag=class_label + '_iou', simple_value=ious[class_i]) summary_writers[s].add_summary(summary, sample_i) summary_writers[s].flush() confusion_matrix_clear_op.eval() coord.request_stop() coord.join([loader_thread]) print 'Training complete.'