def main(): if os.path.exists('models') is False: os.makedirs('models') # get train, test dataloader train_df, test_df = get_train_test() train_dataset = DeepFashionInShopDataset(train_df, 'RANDOM') train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) test_dataset = DeepFashionInShopDataset(test_df, 'CENTER') test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4) #get network print("Creating model: {}".format(const.net_name)) print( "Batch_size:{}; Model learning rate:{}; Center learning rate:{}; Total Epoch:{}; " .format(args.batch_size, args.lr_model, args.lr_cent, args.epoch)) net = const.USE_NET(const.NUM_CLASSES) net = net.to(const.device) # 转移到cpu/gpu上 #center loss and parameters criterion_xent = nn.CrossEntropyLoss() criterion_cent = CenterLoss(num_classes=const.NUM_CLASSES, feat_dim=const.FEATURE_EMBEDDING, use_gpu=True) criterion_pcent = VGG_perceptual_loss_16() optimizer_model = torch.optim.SGD(net.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9) optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent) optimizer_ploss = torch.optim.SGD(criterion_pcent.parameters(), lr=args.lr_ploss) #write to tensorboardX writer = SummaryWriter(const.TRAIN_DIR) scheduler = lr_scheduler.StepLR(optimizer_model, step_size=const.STEP_SIZE, gamma=const.LEARNING_RATE_DECAY) step = 0 for epoch in range(args.epoch): print("==> Epoch {}/{}".format(epoch + 1, args.epoch)) train(net, criterion_xent, criterion_cent, criterion_pcent, optimizer_model, optimizer_centloss, optimizer_ploss, train_dataloader, const.NUM_CLASSES, epoch, writer, step) test(net, test_dataloader, const.NUM_CLASSES, epoch, writer, step) step += 1 print('Saving Model....') torch.save(net.state_dict(), 'models/' + const.MODEL_NAME) print('Finished')
mode=const.DATASET_PROC_METHOD_TRAIN) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4) val_df = df[df['evaluation_status'] == 'test'] val_dataset = DeepFashionCAPDataset(val_df, mode=const.DATASET_PROC_METHOD_VAL) val_dataloader = torch.utils.data.DataLoader( val_dataset, batch_size=const.VAL_BATCH_SIZE, shuffle=False, num_workers=4) val_step = len(val_dataloader) net = const.USE_NET() net = net.to(const.device) learning_rate = const.LEARNING_RATE optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) writer = SummaryWriter(const.TRAIN_DIR) total_step = len(train_dataloader) step = 0 for epoch in range(const.NUM_EPOCH): net.train() for i, sample in enumerate(train_dataloader): step += 1 for key in sample: sample[key] = sample[key].to(const.device)
df = pd.read_csv(const.base_path + const.USE_CSV) inf_df = df inf_dataset = DeepFashionCAPDataset(inf_df, random_state=random_state, mode=const.DATASET_PROC_METHOD_INF, base_path=const.base_path) inf_dataloader = torch.utils.data.DataLoader( inf_dataset, batch_size=1, #const.INF_BATCH_SIZE, shuffle=False, num_workers=6) inf_step = len(inf_dataloader) net = const.USE_NET(const.USE_IORN) net = net.to(const.device) net.load_state_dict(torch.load(const.INIT_MODEL), strict=False) writer = SummaryWriter(const.INF_DIR) inf_step = len(inf_dataloader) with open( '/home/thomasz/SA-FL/data/AttributePrediction/Anno/list_attr_cloth.txt' ) as f: ret = [] f.readline() f.readline() for line in f: line = line.split(' ')
mode=const.DATASET_PROC_METHOD_TRAIN) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4) val_df = df[df['evaluation_status'] == 'test'] val_dataset = DeepFashionCAPDataset(val_df, mode=const.DATASET_PROC_METHOD_VAL) val_dataloader = torch.utils.data.DataLoader( val_dataset, batch_size=const.VAL_BATCH_SIZE, shuffle=False, num_workers=4) val_step = len(val_dataloader) net = const.USE_NET() net = net.to(const.device) learning_rate = const.LEARNING_RATE optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) writer = SummaryWriter(const.TRAIN_DIR) total_step = len(train_dataloader) step = 0 for epoch in range(const.NUM_EPOCH): print("startin epochs") net.train() for i, sample in enumerate(train_dataloader): step += 1 for key in sample: