def get_model(args): # create model if args.arch == 'wide-resnet': model = model_wideresnet.WideResNet(args.wrn_depth, args.num_classes, args.wrn_widen_factor, drop_rate=args.wrn_droprate) elif args.arch == 'mlp': n_units = [int(x) for x in args.mlp_spec.split('x')] # hidden dims n_units.append(args.num_classes) # output dim n_units.insert(0, 32 * 32 * 3) # input dim model = model_mlp.MLP(n_units) elif args.arch == 'resnet18': model = model_resnet.resnet18(args.num_classes) elif args.arch == 'resnet34': model = model_resnet.resnet34(args.num_classes) elif args.arch == 'resnet50': model = model_resnet.resnet50(args.num_classes) elif args.arch == 'vgg16': model = model_vgg.vgg16(args.num_classes) # for training on multiple GPUs. # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use # model = torch.nn.DataParallel(model).cuda() model = model.cuda() return model
apps_train = apps_npb temp = list(set(libdata.apps) - set(list(apps_train))) apps_validation = temp print("validation apps: ", apps_validation) train_start = time.time() for hi in machines: if ml_method == 'xgb': model = model_xgb.XGBoost() if ml_method == 'lr': model = model_lr.LR() if ml_method == 'svr': model = model_svr.SVM() if ml_method == 'gp': model = model_gp.GPR() if ml_method == 'mlp': model = model_mlp.MLP() model.init(**params) totrain.append((model, hi, nTrain, dt, apps_train)) lmodels = pool.map(trainModel, totrain) print("finish: %s\t%d" % (str(datetime.datetime.now()), eval_times)) for i in range(len(lmodels)): models[i + 1] = lmodels[i] train_time += (time.time() - train_start) pool.close() pool.join() start = time.time() #print(start) res = evalAccuracy(apps_validation=apps_validation) pred_time += (time.time() - start) pool = mp.Pool(mp.cpu_count()) # test_resdf = testPop(nTests = NTESTS)