def run_scene(value): runner = Runner() runner.start() runner.pre(process_id=mp.current_process(), rotor_center=[60, value]) runner.solve() force_y = runner.post() return force_y
def __init__( self, display: Display, db: pymysql.Connection, available_gpu_ids: typing.List[int], temp_dir_root: str, repo_cache_dir: str, trash_dir_root: str, max_parallel: int, labels: typing.List[str], name: str = socket.gethostname(), ): self.display = display self.db = db self.repo = JobRepository(self.db) self.runner_repo = RunnerRepository(self.db) self.active_executors: typing.Dict[int, WrapExecutor] = {} # Job.id -> self.finished_executors_queue = queue.Queue() self.available_gpu_ids = set(available_gpu_ids) self.temp_dir_root = temp_dir_root self.trash_dir_root = trash_dir_root self.repo_cache_dir = repo_cache_dir self.max_parallel = max_parallel self.name = name self.labels = labels self.runner = Runner( name=self.name, gpu_ids=','.join(list(map(str, self.available_gpu_ids))), labels=','.join(labels), status=RunnerStatus.Running, ) self.finish_flg = False self.finished_jobs = [] self.display.render_toppage = self._render
def add_runner_to_database(email, password, salt): """Adds a runner to the database and returns the runner object""" runner = Runner(email=email, password=password, salt=salt) db.session.add(runner) db.session.commit() return runner
def create(self, runner: Runner): with db_lock: runner_dict = runner._asdict() runner_dict['created_at'] = datetime.datetime.now( tz=self.tz).isoformat() runner_dict['updated_at'] = datetime.datetime.now( tz=self.tz).isoformat() del runner_dict['id'] sql = 'INSERT INTO runners (' + ', '.join(list( runner_dict.keys())) + ') VALUES (' + ', '.join( ['%s'] * len(runner_dict.keys())) + ')' with self.db.cursor() as cur: cur.execute(sql, list(runner_dict.values())) cur.execute( 'SELECT * from runners WHERE id = LAST_INSERT_ID() LIMIT 1' ) result = cur.fetchone() return Runner(**result)
def _update(self, id: int, **kwargs): default_runner = Runner() runner = dict() for key, value in kwargs.items(): if hasattr(default_runner, key) and type(value) == type( getattr(default_runner, key)): runner[key] = value runner['updated_at'] = datetime.datetime.now(tz=self.tz).isoformat() sql = 'UPDATE runners set ' + ', '.join( [key + '= %s' for key in runner.keys()]) + ' WHERE id = %s' with self.db.cursor() as cur: cur.execute(sql, list(runner.values()) + [id])
def main(checkpoint, **args): task_id = setup_logging( 'gen', logging.NOTSET if args.get('debug', False) else logging.INFO) params = dict( { 'n_rnn': 3, 'dim': 1024, 'learn_h0': False, 'q_levels': 256, 'weight_norm': True, 'frame_sizes': [16, 16, 4], 'sample_rate': 16000, 'n_samples': 1, 'sample_length': 16000 * 60 * 4, 'sampling_temperature': 1, 'q_method': QMethod.LINEAR, }, exp=checkpoint, **args) logging.info(str(params)) logging.info('booting') # dataset = storage_client.list_blobs(bucket, prefix=path) # for blob in dataset: # blob.download_to_filename(blob.name) bucket = None if args['bucket']: logging.debug('setup google storage bucket {}'.format(args['bucket'])) storage_client = storage.Client() bucket = Bucket(storage_client, args['bucket']) preload_checkpoint(checkpoint, storage_client, bucket) results_path = os.path.abspath( os.path.join(checkpoint, os.pardir, os.pardir, task_id)) ensure_dir_exists(results_path) checkpoint = os.path.abspath(checkpoint) tmp_pretrained_state = torch.load( checkpoint, map_location=lambda storage, loc: storage.cuda(0) if args['cuda'] else storage) # Load all tensors onto GPU 1 # torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1)) pretrained_state = OrderedDict() for k, v in tmp_pretrained_state.items(): # Delete "model." from key names since loading the checkpoint automatically attaches it layer_name = k.replace("model.", "") pretrained_state[layer_name] = v # print("k: {}, layer_name: {}, v: {}".format(k, layer_name, np.shape(v))) # Create model with same parameters as used in training model = SampleRNN(frame_sizes=params['frame_sizes'], n_rnn=params['n_rnn'], dim=params['dim'], learn_h0=params['learn_h0'], q_levels=params['q_levels'], weight_norm=params['weight_norm']) if params['cuda']: model = model.cuda() # Load pretrained model model.load_state_dict(pretrained_state) def upload(file_path): if bucket is None: return # remove prefix /app name = file_path.replace(os.path.abspath(os.curdir) + '/', '') blob = Blob(name, bucket) logging.info('uploading {}'.format(name)) blob.upload_from_filename(file_path) (_, dequantize) = quantizer(params['q_method']) gen = Gen(Runner(model), params['cuda']) gen.register_plugin( GeneratorPlugin(results_path, params['n_samples'], params['sample_length'], params['sample_rate'], params['q_levels'], dequantize, params['sampling_temperature'], upload)) gen.run()
def find_runner_by_intania(cls, intania): runner_cursor = cls.find_runner({"intania": intania}) return [Runner.from_doc(runner_doc) for runner_doc in runner_cursor]
def find_runner(cls, query={}): runner_cursor = cls.DB.challenge_runners.find(query) return [Runner.from_doc(runner_doc) for runner_doc in runner_cursor]
def find_one_runner(cls, query={}): runner_doc = cls.DB.challenge_runners.find_one(query) return Runner.from_doc(runner_doc)
def get(self, id: int) -> Optional[Runner]: with db_lock: with self.db.cursor() as cur: cur.execute('SELECT * from runners WHERE id = %s LIMIT 1', id) row = cur.fetchone() return Runner(**row)