def pretrain(): train = FabricDataset(root='./data/X_fabric_data/tr/') test = FabricDataset(root='./data/X_fabric_data/te/') train = DataLoader(train, batch_size=128, shuffle=True, num_workers=4) test = DataLoader(test, batch_size=128, shuffle=True, num_workers=4) MAX_EPOCHES = 20 lr = 0.1 model = fabric() model = model.to(device) if cuda.is_available(): model = torch.nn.DataParallel(model) opt1 = torch.optim.Adam(model.parameters()) criterion = nn.CrossEntropyLoss() for epoch in range(MAX_EPOCHES): for xx, xt, Y in train: xx = xx.to(device) xt = xt.to(device) c = model(xx, xt) Y = Y.to(device) loss = criterion(c, Y) opt1.zero_grad() loss.backward() opt1.step() print(loss.item()) with torch.no_grad(): right = 0 su = 0 for xx, xt, Y in test: xx = xx.to(device) xt = xt.to(device) c = model(xx, xt) su = su + Y.shape[0] right = right + (c.argmax(dim=1) == Y.to(device)).float().sum().cpu().item() print('test', epoch, right / su) if cuda.is_available(): return model.module.feature else: return model.feature
def finetune(feature): xxx = torchvision.models.resnet18(num_classes=4) classifier = xxx.fc train = FabricDataset(root='./data/DIY_fabric_data/tr/') test = FabricDataset(root='./data/DIY_fabric_data/te/') train = DataLoader(train, batch_size=128, shuffle=True, num_workers=4) test = DataLoader(test, batch_size=128, shuffle=True, num_workers=4) MAX_EPOCHES = 30 lr = 0.1 feature = feature.to(device) classifier = classifier.to(device) if cuda.is_available(): feature = torch.nn.DataParallel(feature) classifier = torch.nn.DataParallel(classifier) opt1 = torch.optim.Adam([{ 'params': feature.parameters(), 'lr': 0.0001 }, { 'params': classifier.parameters(), 'lr': 0.01 }]) criterion = nn.CrossEntropyLoss() avgpool = xxx.avgpool for epoch in range(MAX_EPOCHES): for xx, xt, Y in train: xx = xx.to(device) xt = xt.to(device) y = feature(xx) ytemp = feature(xt) y = avgpool(y) ytemp = avgpool(ytemp) c = classifier(torch.flatten(y - ytemp, 1)) Y[Y == 1] = 0 Y[Y == 2] = 1 Y[Y == 5] = 2 Y[Y == 13] = 3 Y = Y.to(device) loss = criterion(c, Y) opt1.zero_grad() loss.backward() opt1.step() print(loss.item()) with torch.no_grad(): right = 0 su = 0 for xx, xt, Y in test: xx = xx.to(device) xt = xt.to(device) y = feature(xx) ytemp = feature(xt) y = avgpool(y) ytemp = avgpool(ytemp) c = classifier(torch.flatten(y - ytemp, 1)) su = su + Y.shape[0] Y[Y == 1] = 0 Y[Y == 2] = 1 Y[Y == 5] = 2 Y[Y == 13] = 3 right = right + (c.argmax(dim=1) == Y.to(device)).float().sum().cpu().item() print('test', epoch, right / su)
self.fc = nn.Linear(512 * 7 * 7, 4) def forward(self, xx, xt): y = self.pretrained(xx) ytemp = self.pretrained(xt) c = self.fc(torch.flatten(y-ytemp, 1)) return c if __name__ == "__main__": # tainsform未生效 transform = transforms.Compose([transforms.ToTensor(), transforms.Resize((224, 224)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train = FabricDataset(root='./data/DIY_fabric_data/tr/') test = FabricDataset(root='./data/DIY_fabric_data/te/') train = DataLoader(train, batch_size=128, shuffle=True, num_workers=4) test = DataLoader(test, batch_size=128, shuffle=True, num_workers=4) MAX_EPOCHES = 100 lr = 0.1 model = fabric() model = model.to(device) if cuda.is_available(): model = torch.nn.DataParallel(model) ''' opt1=torch.optim.Adam([ {'params':model.module.pretrained.parameters(),'lr':0.000001},
def finetune(feature, f2): train = FabricDataset(root='./data/DIY_fabric_data/tr/') test = FabricDataset(root='./data/DIY_fabric_data/te/') train = DataLoader(train, batch_size=128, shuffle=True, num_workers=4) test = DataLoader(test, batch_size=128, shuffle=True, num_workers=4) MAX_EPOCHES = 30 lr = 0.1 classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 12 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, 4), ) feature = feature.to(device) f2 = f2.to(device) classifier = classifier.to(device) if cuda.is_available(): feature = torch.nn.DataParallel(feature) f2 = torch.nn.DataParallel(f2) classifier = torch.nn.DataParallel(classifier) opt1 = torch.optim.Adam([{ 'params': feature.parameters(), 'lr': 0.0001 }, { 'params': classifier.parameters(), 'lr': 0.01 }]) criterion = nn.CrossEntropyLoss() avgpool = torchvision.models.AlexNet(4).avgpool for epoch in range(MAX_EPOCHES): for xx, xt, Y in train: xx = xx.to(device) xt = xt.to(device) y1 = feature(xx) ytemp1 = feature(xt) y2 = f2(y1) ytemp2 = f2(ytemp1) y1 = avgpool(y1) ytemp1 = avgpool(ytemp1) y2 = avgpool(y2) ytemp2 = avgpool(ytemp2) c = classifier( torch.cat((torch.flatten( y1 - ytemp1, 1), torch.flatten(y2 - ytemp2, 1)), 1)) Y[Y == 1] = 0 Y[Y == 2] = 1 Y[Y == 5] = 2 Y[Y == 13] = 3 Y = Y.to(device) loss = criterion(c, Y) opt1.zero_grad() loss.backward() opt1.step() print(loss.item()) with torch.no_grad(): right = 0 su = 0 for xx, xt, Y in test: xx = xx.to(device) xt = xt.to(device) y1 = feature(xx) ytemp1 = feature(xt) y2 = f2(y1) ytemp2 = f2(ytemp1) y1 = avgpool(y1) ytemp1 = avgpool(ytemp1) y2 = avgpool(y2) ytemp2 = avgpool(ytemp2) c = classifier( torch.cat((torch.flatten( y1 - ytemp1, 1), torch.flatten(y2 - ytemp2, 1)), 1)) su = su + Y.shape[0] Y[Y == 1] = 0 Y[Y == 2] = 1 Y[Y == 5] = 2 Y[Y == 13] = 3 right = right + (c.argmax(dim=1) == Y.to(device)).float().sum().cpu().item() print('test', epoch, right / su)