def main(): config = configuration.Configuration.load("config.json") if config.logging_file_name is not None: logging.basicConfig(level=logging.INFO, format="%(message)s", filename=config.logging_file_name, filemode="w") else: logging.basicConfig(level=logging.INFO, format="%(message)s") vocab = Vocabulary.load_vocabulary("./data/weibo_vocab_word.pkl") config.common["num_word"] = vocab.truncated_length frame = inspect.currentframe() gpu_tracker = MemTracker(frame) gpu_tracker.track() model = seq2seqVAD.Seq2SeqVAD(config) gpu_tracker.track() with open("./data/weibo_embedding_word.pkl", "rb") as f: embedding = pickle.load(f) model.init_weight(embedding) logging.info(repr(config)) logging.info(repr(model)) batch_size = config.learning["batch_size"] train_batch_generator = dataIter.BatchIter("data/weibo_train_word.pkl", batch_size) valid_batch_generator = dataIter.BatchIter("data/weibo_test_word.pkl", batch_size) test_batch_generator = dataIter.BatchIter("data/weibo_test_word.pkl", 10) train_report_names = [ "ce", "bwd_rnn_kld", "aux_bow", "bwd_ce", "loss", "KLD_weight", "aux_weight", "bwd_ce_weight" ] train_report = report.Report(train_report_names) valid_report_names = ["ce"] generation_report_names = [ "emb_avg", "emb_ext", "emb_gre", "dist-1", "dist-2", "novel" ] generation_report = report.Report(generation_report_names) valid_report = report.Report(valid_report_names) parent_name = config.learning["parent_name"] if not exists(parent_name): os.mkdir(parent_name) os.mkdir(join(parent_name, "report")) os.mkdir(join(parent_name, "stochastic_array")) os.mkdir(join(parent_name, "saved_models")) os.mkdir(join(parent_name, "generated_dialogues")) train.train_model(model, config, train_batch_generator=train_batch_generator, valid_batch_generator=valid_batch_generator, test_batch_generator=test_batch_generator, vocab=vocab, embedding=embedding, report_train=train_report, report_valid=valid_report, report_generation=generation_report, parent_file_name=parent_name)
rpn_anchor_generator=rpn_anchor_generator) data_train =Positive_Roi_Dataset('E:/ali_cervical_carcinoma_data',train=True) data_test =Positive_Roi_Dataset('E:/ali_cervical_carcinoma_data',train=False) # print('data_test num=', len(data_test), '\nfileds:\n', data_test[0][1]) trainLoader = data.DataLoader(data_train, batch_size=1, shuffle=True, collate_fn=utils.collate_fn) testLoader = data.DataLoader(data_test, batch_size=1, shuffle=False, collate_fn=utils.collate_fn) torch.cuda.empty_cache() #观察GPU device = torch.device('cuda') frame = inspect.currentframe() # define a frame to track gpu_tracker = MemTracker(frame) # define a GPU tracker gpu_tracker.track() # run function between the code line where uses GPU model.to(device) # gpu_tracker.track() # run function between the code line where uses GPU params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.0005) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
AlexNet_layers.append(nn.Linear(4096, 4096)) AlexNet_layers.append(nn.ReLU()) AlexNet_layers.append(nn.Dropout(0.5)) AlexNet_layers.append(nn.Linear(4096, 1000)) Stash_List = [3,6,8,10,14,17,20] for i in AlexNet_layers: if i != view: i = i.cuda() x = inp.cuda() frame = inspect.currentframe() # define a frame to track gpu_tracker = MemTracker(frame) # define a GPU tracker gpu_tracker.track() # forward() #import time for j in Stash_List: print("Stash first %s Layers:"%(Stash_List.index(j))) x = inp.cuda() #begin = time.time() for i in range(len(AlexNet_layers)): # y = x if i < j : stash = True else: stash = False