def main(): dataset, learning_rate, epoch, alpha, beta, gamma, pretrain_epoch = parse_argument( ) random.seed(9001) dataset_config = { 'feature_file': './Database/' + dataset + '/features.txt', 'graph_file': './Database/' + dataset + '/edges.txt', 'walks_file': './Database/' + dataset + '/walks.txt', 'label_file': './Database/' + dataset + '/group.txt' } graph = Dataset(dataset_config) pretrain_config = { 'net_shape': [6, 1], 'att_shape': [1], 'net_input_dim': graph.num_nodes, 'att_input_dim': graph.num_feas, 'pretrain_params_path': './Log/' + dataset + '/pretrain_params.pkl' } model_config = { 'net_shape': [6, 1], 'att_shape': [1], 'net_input_dim': graph.num_nodes, 'att_input_dim': graph.num_feas, 'is_init': True, 'pretrain_params_path': './Log/' + dataset + '/pretrain_params.pkl' } trainer_config = { 'net_shape': [6, 1], 'att_shape': [1], 'net_input_dim': graph.num_nodes, 'att_input_dim': graph.num_feas, 'drop_prob': 0.2, 'learning_rate': learning_rate, 'batch_size': 100, 'num_epochs': epoch, 'beta': beta, 'alpha': alpha, 'gamma': gamma, 'model_path': './Log/' + dataset + '/test_model.pkl', } print("=" * 30 + "开始进行参数预训练" + "=" * 30) pretrainer = PreTrainer(pretrain_config) pretrainer.pretrain(graph.X, 'net', pretrain_epoch) # walks.txt的矩阵 pretrainer.pretrain(graph.Z, 'att', pretrain_epoch) model = Model(model_config) trainer = Trainer(model, trainer_config) trainer.train(graph) train_emb = trainer.infer(graph) train_emb = np.array(train_emb) with open("./Log/" + dataset + "/train_emb.txt", 'w') as f: np.savetxt(f, train_emb)
ccsistent_loss, 'beta_W': beta_W, 'View_num': View_num, 'View': layers, 'dims': dims, 'drop_prob': 0.2, 'learning_rate': learning_rate, 'batch_size': 2000, 'num_epochs': 1000, 'model_path': './Log/' + dataset_name + '/' + dataset_name + '_model.pkl', } model = MVModel(model_config) trainer = Trainer(model, trainer_config) trainer.train(graph) acc, nmi = trainer.inferCluster(graph) result_single = 'ccsistent_loss={:.4f}:'.format( ccsistent_loss) + ' acc={:.4f}'.format( acc) + ' & ' + 'nmi={:.4f}'.format(nmi) f.write(result_single + '\n') f.flush()
test_mode=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=0) print('num_train_files: ' + str(len(train_dataset.filepaths))) print('num_val_files: ' + str(len(val_dataset.filepaths))) trainer = Trainer(cnet, train_loader, val_loader, optimizer, nn.CrossEntropyLoss(), 'svcnn', log_dir, num_views=1) trainer.train(30) # STAGE 2 log_dir = args.name + '_stage_2' create_folder(log_dir) cnet_2 = MVCNN(args.name, cnet, nclasses=40, cnn_name=args.cnn_name, num_views=args.num_views) del cnet optimizer = optim.Adam(cnet_2.parameters(), lr=args.lr, weight_decay=args.weight_decay, betas=(0.9, 0.999))