from scipy.stats import spearmanr from tensorboardX import SummaryWriter from sklearn.metrics import accuracy_score # from models.u_model import NIMA # from models.e_model import NIMA # from models.relic_model import NIMA from models.relic1_model import NIMA # from models.relic2_model import NIMA from dataset import AVADataset from util import EDMLoss,AverageMeter import option f = open('/data/mayme/git/AVA/checkpoint/log_test.txt', 'w') opt = option.init() opt.device = torch.device("cuda:{}".format(opt.gpu_id)) def adjust_learning_rate(params, optimizer, epoch): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" lr = params.init_lr * (0.1 ** (epoch // 10)) for param_group in optimizer.param_groups: param_group['lr'] = lr def get_score(opt,y_pred): w = torch.from_numpy(np.linspace(1,10, 10)) w = w.type(torch.FloatTensor) w = w.to(opt.device) w_batch = w.repeat(y_pred.size(0), 1)
next(generator) # send state self._notify(state='running') # do while True: next(generator) except StopIteration: self._notify(state='end') def _load_worker(self, server, args): assert self._args.worker_path is not None spec = importlib.util.spec_from_file_location("__importlib_module__", self._args.worker_path) m = importlib.util.module_from_spec(spec) spec.loader.exec_module(m) g = m.main(server, args) return g def _notify(self, **kwargs): if self._manager is None: return self._manager.send(name=self._args.name, **kwargs) if __name__ == '__main__': import option option.init() Process(option.args, option.cluster)()