def _parseConfig(self, config_path): def toList(item): if not item: return [] if isinstance(item, list): return item return [item] if config_path: config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): raise Exception("Unable to read layout config file at %s" % config_path) config_file = open(config_path) data = yaml.load(config_file) self._config_env = {} for include in data.get('includes', []): if 'python-file' in include: fn = include['python-file'] if not os.path.isabs(fn): base = os.path.dirname(config_path) fn = os.path.join(base, fn) fn = os.path.expanduser(fn) execfile(fn, self._config_env) for conf_pipeline in data.get('pipelines', []): pipeline = Pipeline(conf_pipeline['name']) manager = globals()[conf_pipeline['manager']](self, pipeline) pipeline.setManager(manager) self.pipelines[conf_pipeline['name']] = pipeline manager.success_action = conf_pipeline.get('success') manager.failure_action = conf_pipeline.get('failure') manager.start_action = conf_pipeline.get('start') for trigger in toList(conf_pipeline['trigger']): approvals = {} for approval_dict in toList(trigger.get('approval')): for k, v in approval_dict.items(): approvals[k] = v f = EventFilter(types=toList(trigger['event']), branches=toList(trigger.get('branch')), refs=toList(trigger.get('ref')), approvals=approvals, comment_filters=toList( trigger.get('comment_filter'))) manager.event_filters.append(f) for config_job in data['jobs']: job = self.getJob(config_job['name']) # Be careful to only set attributes explicitly present on # this job, to avoid squashing attributes set by a meta-job. m = config_job.get('failure-message', None) if m: job.failure_message = m m = config_job.get('success-message', None) if m: job.success_message = m m = config_job.get('hold-following-changes', False) if m: job.hold_following_changes = True fname = config_job.get('parameter-function', None) if fname: func = self._config_env.get(fname, None) if not func: raise Exception("Unable to find function %s" % fname) job.parameter_function = func branches = toList(config_job.get('branch')) if branches: job._branches = branches job.branches = [re.compile(x) for x in branches] def add_jobs(job_tree, config_jobs): for job in config_jobs: if isinstance(job, list): for x in job: add_jobs(job_tree, x) if isinstance(job, dict): for parent, children in job.items(): parent_tree = job_tree.addJob(self.getJob(parent)) add_jobs(parent_tree, children) if isinstance(job, str): job_tree.addJob(self.getJob(job)) for config_project in data['projects']: project = Project(config_project['name']) self.projects[config_project['name']] = project for pipeline in self.pipelines.values(): if pipeline.name in config_project: job_tree = pipeline.addProject(project) config_jobs = config_project[pipeline.name] add_jobs(job_tree, config_jobs) # All jobs should be defined at this point, get rid of # metajobs so that getJob isn't doing anything weird. self.metajobs = {} # TODO(jeblair): check that we don't end up with jobs like # "foo - bar" because a ':' is missing in the yaml for a dependent job for pipeline in self.pipelines.values(): pipeline.manager._postConfig()
def _parseConfig(self, config_path): def toList(item): if not item: return [] if isinstance(item, list): return item return [item] if config_path: config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): raise Exception("Unable to read layout config file at %s" % config_path) config_file = open(config_path) data = yaml.load(config_file) validator = layoutvalidator.LayoutValidator() validator.validate(data) self._config_env = {} for include in data.get('includes', []): if 'python-file' in include: fn = include['python-file'] if not os.path.isabs(fn): base = os.path.dirname(config_path) fn = os.path.join(base, fn) fn = os.path.expanduser(fn) execfile(fn, self._config_env) for conf_pipeline in data.get('pipelines', []): pipeline = Pipeline(conf_pipeline['name']) pipeline.description = conf_pipeline.get('description') pipeline.failure_message = conf_pipeline.get('failure-message', "Build failed.") pipeline.success_message = conf_pipeline.get('success-message', "Build succeeded.") pipeline.dequeue_on_new_patchset = conf_pipeline.get( 'dequeue-on-new-patchset', True) manager = globals()[conf_pipeline['manager']](self, pipeline) pipeline.setManager(manager) self.pipelines[conf_pipeline['name']] = pipeline manager.success_action = conf_pipeline.get('success') manager.failure_action = conf_pipeline.get('failure') manager.start_action = conf_pipeline.get('start') for trigger in toList(conf_pipeline['trigger']): approvals = {} for approval_dict in toList(trigger.get('approval')): for k, v in approval_dict.items(): approvals[k] = v f = EventFilter(types=toList(trigger['event']), branches=toList(trigger.get('branch')), refs=toList(trigger.get('ref')), approvals=approvals, comment_filters= toList(trigger.get('comment_filter')), email_filters= toList(trigger.get('email_filter'))) manager.event_filters.append(f) for config_job in data.get('jobs', []): job = self.getJob(config_job['name']) # Be careful to only set attributes explicitly present on # this job, to avoid squashing attributes set by a meta-job. m = config_job.get('failure-message', None) if m: job.failure_message = m m = config_job.get('success-message', None) if m: job.success_message = m m = config_job.get('failure-pattern', None) if m: job.failure_pattern = m m = config_job.get('success-pattern', None) if m: job.success_pattern = m m = config_job.get('hold-following-changes', False) if m: job.hold_following_changes = True m = config_job.get('voting', None) if m is not None: job.voting = m fname = config_job.get('parameter-function', None) if fname: func = self._config_env.get(fname, None) if not func: raise Exception("Unable to find function %s" % fname) job.parameter_function = func branches = toList(config_job.get('branch')) if branches: job._branches = branches job.branches = [re.compile(x) for x in branches] def add_jobs(job_tree, config_jobs): for job in config_jobs: if isinstance(job, list): for x in job: add_jobs(job_tree, x) if isinstance(job, dict): for parent, children in job.items(): parent_tree = job_tree.addJob(self.getJob(parent)) add_jobs(parent_tree, children) if isinstance(job, str): job_tree.addJob(self.getJob(job)) for config_project in data.get('projects', []): project = Project(config_project['name']) self.projects[config_project['name']] = project mode = config_project.get('merge-mode') if mode and mode == 'cherry-pick': project.merge_mode = model.CHERRY_PICK for pipeline in self.pipelines.values(): if pipeline.name in config_project: job_tree = pipeline.addProject(project) config_jobs = config_project[pipeline.name] add_jobs(job_tree, config_jobs) # All jobs should be defined at this point, get rid of # metajobs so that getJob isn't doing anything weird. self.metajobs = {} for pipeline in self.pipelines.values(): pipeline.manager._postConfig()
import lpips from model import Pipeline from dataset import Dataset from utils import masked_l1 import wandb from tqdm import tqdm torch.autograd.set_detect_anomaly(True) device = "cuda" if torch.cuda.is_available() else "cpu" dataset = Dataset(num_parts=24) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True) model = Pipeline(H=512, W=512, num_features=16, num_parts=24) model.to(device) # model.load_state_dict(torch.load("tmp.pth")) model.train() learning_rate = 1e-3 optimizer = torch.optim.Adam([ # Apply increasing amount of regularization to finer layers { "params": model.atlas.layer1, 'weight_decay': 1e-2, 'lr': learning_rate }, { "params": model.atlas.layer2, 'weight_decay': 1e-3,