def run(json_pth, save_pth, img_pth, check_dr ): js = json.loads(open(json_pth).read()) lbses = js['label_ix'] new_lbs = dict() for x in lbses: new_lbs[lbses[x]] = x pred_pathes = save_pth base_dr = img_pth model = se_resnext50_32x4d(num_classes=1000, pretrained=None) model.fc1 = nn.Linear(2048, 2) model.fc2 = nn.Linear(2, len(new_lbs)) model.load_state_dict(torch.load(check_dr), strict=False) model.cuda() model.eval() for ip in glob.glob(os.path.join(base_dr, '*.*')): imag = Image.open(ip) score = [] for x in range(1): ig = trans(imag) ig = ig.unsqueeze(0) ig = torch.autograd.Variable(ig.cuda()) fc1, output = model(ig) output = F.softmax(output, dim=1) pred = output.data.squeeze(dim=0).cpu().numpy() score.append(pred) score = np.asarray(score) score = np.sum(score, axis=0) / 1 pred_lb = np.argmax(score) sc = np.max(score) pred_label = new_lbs[pred_lb] name = ip.replace('\\', '/').split('/')[-1] new_path = os.path.join(pred_pathes, pred_label) if not os.path.exists(new_path): os.makedirs(new_path) shutil.copy(ip, new_path + '/' + name)
def run(json_pth, save_pth, image_pth, check_dr): js = json.loads(open(json_pth).read()) lbses = js['label_ix'] new_lbs = dict() for x in lbses: new_lbs[lbses[x]] = x print(new_lbs) base_dr = image_pth # base_dr = 'D:/deep_learn_data/luntai/crop/crop' model = se_resnext50_32x4d(num_classes=1000, pretrained=None) model.fc1 = nn.Linear(2048, 2) model.fc2 = nn.Linear(2, len(new_lbs)) model.load_state_dict(torch.load(check_dr), strict=False) model.cuda() model.eval() for k1 in glob.glob(os.path.join(base_dr, '*.*')): imag = Image.open(k1) ig = np.asarray(imag) img, _, _, _, _ = resize_image_fixed_size(ig, [512, 512]) ig = Image.fromarray(img) ig = trans(ig) ig = ig.unsqueeze(0) ig = torch.autograd.Variable(ig.cuda()) fc1, output = model(ig) output = F.softmax(output, dim=1) pred = output.data.squeeze(dim=0).cpu().numpy() pred_lb = np.argmax(pred) sc = np.max(pred) pred_label = new_lbs[pred_lb] name = k.replace('\\', '/').split('/')[-1] new_path = os.path.join(save_pth, pred_label) if not os.path.exists(new_path): os.makedirs(new_path) shutil.copy(k1, new_path)
def run(trainr, test_dr, name, cls_num, idx): batch_size = 2 imagenet_data = ImageFolder(trainr, transform=data_transforms['train']) test_data = ImageFolder(test_dr, transform=data_transforms['val']) data_loader = DataLoader(imagenet_data, batch_size=batch_size, shuffle=True) test_data_loader = DataLoader(test_data, batch_size=1, shuffle=True) model = se_resnext50_32x4d(num_classes=1000, pretrained=None) model.load_state_dict( torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'), strict=False) model.fc1 = nn.Linear(2048, 2) model.fc2 = nn.Linear(2, cls_num) #model.load_state_dict(torch.load('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/1009_res_total.pth'), strict=False) model.cuda() state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005} optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'], weight_decay=state['decay'], nesterov=True) state['label_ix'] = imagenet_data.class_to_idx state['cls_name'] = name centerloss = CenterLoss(cls_num, 2) centerloss.cuda() optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3) state['best_accuracy'] = 0 sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5) ll = len(data_loader.dataset) focal_loss = FocalLoss(gamma=2) focal_loss.cuda() def train(): model.train() loss_avg = 0.0 progress = ProgressBar() ip1_loader = [] idx_loader = [] correct = 0 for (data, target) in progress(data_loader): data.detach().numpy() if data.size(0) != batch_size: break data, target = torch.autograd.Variable( data.cuda()), torch.autograd.Variable(target.cuda()) f1, output = model(data) pred = output.data.max(1)[1] correct += float(pred.eq(target.data).sum()) optimizer.zero_grad() optimzer_center.zero_grad() loss = focal_loss(output, target) + centerloss(target, f1) * 0.0 loss.backward() optimizer.step() optimzer_center.step() ip1_loader.append(f1) idx_loader.append((target)) loss_avg = loss_avg * 0.2 + float(loss) * 0.8 print(correct, ll, loss_avg) state['train_accuracy'] = correct / len(data_loader.dataset) feat = torch.cat(ip1_loader, 0) labels = torch.cat(idx_loader, 0) visualize(feat.data.cpu().numpy(), labels.data.cpu().numpy(), epoch, cls_num) state['train_loss'] = loss_avg def test(): with torch.no_grad(): model.eval() loss_avg = 0.0 correct = 0 for batch_idx, (data, target) in enumerate(test_data_loader): data, target = torch.autograd.Variable( data.cuda()), torch.autograd.Variable(target.cuda()) f1, output = model(data) loss = F.cross_entropy(output, target) pred = output.data.max(1)[1] correct += float(pred.eq(target.data).sum()) loss_avg += float(loss) state['test_loss'] = loss_avg / len(test_data_loader) state['test_accuracy'] = correct / len( test_data_loader.dataset) print(state['test_accuracy']) best_accuracy = 0.0 for epoch in range(30): state['epoch'] = epoch train() #test() sch.step(state['train_accuracy']) if state['best_accuracy'] < state['train_accuracy']: state['best_accuracy'] = state['train_accuracy'] torch.save( model.state_dict(), os.path.join( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx + '.pth')) with open( os.path.join( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx + '.json'), 'w') as f: f.write(json.dumps(state)) f.flush() print(state) print("Best accuracy: %f" % state['best_accuracy']) if best_accuracy == 1.0: break
def run(trainr,testdr, name,cls_num,idx): batch_size = 8 data_loader, imagenet_data,new_lbs = load_dat(batch_size, trainr) model = se_resnext50_32x4d(num_classes=1000,pretrained=None) model.load_state_dict(torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'), strict=False) model.fc1 = nn.Linear(2048, 4) model.fc2 = nn.Linear(4, cls_num) #model.load_state_dict(torch.load('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/1009_res_total.pth'), strict=False) model.cuda() state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005} optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'], weight_decay=state['decay'], nesterov=True) state['label_ix'] = imagenet_data.class_to_idx state['cls_name'] = name centerloss = CenterLoss(cls_num, 4) centerloss.cuda() optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3) state['best_accuracy'] = 0 sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5) ll = len(data_loader.dataset) focal_loss = FocalLoss(gamma=2) focal_loss.cuda() state['train_accuracy'] =0 def train(): model.train() loss_avg = 0.0 progress = ProgressBar() ip1_loader = [] idx_loader = [] correct = 0 for (data, target) in progress(data_loader): data.detach().numpy() if data.size(0) != batch_size: break data, target = torch.autograd.Variable(data.cuda()), torch.autograd.Variable(target.cuda()) f1, output = model(data) pred = output.data.max(1)[1] correct += float(pred.eq(target.data).sum()) optimizer.zero_grad() optimzer_center.zero_grad() loss = focal_loss(output, target)+ centerloss(target, f1)*0.3 loss.backward() optimizer.step() optimzer_center.step() loss_avg = loss_avg * 0.2 + float(loss) * 0.8 print(correct, ll, loss_avg) state['train_accuracy'] = correct / len(data_loader.dataset) state['train_loss'] = loss_avg def test(): with torch.no_grad(): model.eval() loss_avg = 0.0 correct = 0 for k in glob.glob(os.path.join(testdr,'*.jpg')): imag = Image.open(k) ig = data_transforms['val'](imag) ig = ig.unsqueeze(0) ig = torch.autograd.Variable(ig.cuda()) f1, output = model(ig) output = F.softmax(output, dim=1) pred = output.data.squeeze(dim=0).cpu().numpy() score = np.asarray(pred) score = np.sum(score, axis=0) pred_lb = np.argmax(score) sc = np.max(score) print(k) lbs = new_lbs[pred_lb] if sc>0.66: shutil.copy(k,os.path.join(train_dr, lbs)) else: try: nn_name = k.split('/')[-1] os.remove(os.path.join(train_dr, lbs, nn_name)) except: pass best_accuracy = 0.0 for epoch in range(100): state['epoch'] = epoch train() test() data_loader, imagenet_data, new_lbs = load_dat(batch_size, trainr) sch.step(state['train_accuracy']) if best_accuracy < state['train_accuracy']: state['best_accuracy'] = state['train_accuracy'] torch.save(model.state_dict(), os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx+'.pth')) with open(os.path.join('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check', idx+'.json'),'w') as f: f.write(json.dumps(state)) f.flush() print(state) print("Best accuracy: %f" % state['best_accuracy']) if best_accuracy == 1.0: break
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) js = json.loads( open( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/round2.json' ).read()) lbs = js['label_ix'] new_lbs = dict() for x in lbs: new_lbs[lbs[x]] = x print(new_lbs) pred_path = '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/testb_round/step2' base_dr = '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/guangdong_round2_test_b_20181106' #base_dr = 'D:/deep_learn_data/luntai/crop/crop' model = se_resnext50_32x4d(num_classes=1000, pretrained=None) model.fc1 = nn.Linear(2048, 2) model.fc2 = nn.Linear(2, 2) model.load_state_dict(torch.load( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/round2.pth'), strict=False) model.cuda() model.eval() for k in glob.glob(os.path.join(base_dr, '*.*')): imag = Image.open(k) score = [] for x in range(1): ig = trans(imag) ig = ig.unsqueeze(0) ig = torch.autograd.Variable(ig.cuda()) fc1, output = model(ig)
def run(train_sets, valid_sets, cls_num, idx): batch_size = 6 train_gen = get_batch(batch_size=batch_size, data_set=train_sets, image_size=train_sets.image_size) valid_gen = get_batch(batch_size=1, data_set=valid_sets, image_size=train_sets.image_size) model = se_resnext50_32x4d(num_classes=1000, pretrained=None) model.load_state_dict( torch.load('/home/dsl/all_check/se_resnext50_32x4d-a260b3a4.pth'), strict=False) model.fc1 = nn.Linear(2048, 2) model.fc2 = nn.Linear(2, cls_num) #model.load_state_dict(torch.load('/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/dsl/check/1009_res_total.pth'), strict=False) model.cuda() state = {'learning_rate': 0.01, 'momentum': 0.9, 'decay': 0.0005} optimizer = torch.optim.SGD(model.parameters(), state['learning_rate'], momentum=state['momentum'], weight_decay=state['decay'], nesterov=True) state['label_ix'] = train_sets.cls_map state['cls_name'] = idx centerloss = CenterLoss(cls_num, 2) centerloss.cuda() optimzer_center = torch.optim.SGD(centerloss.parameters(), lr=0.3) state['best_accuracy'] = 0 sch = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5) ll = train_sets.len() focal_loss = FocalLoss(gamma=2) focal_loss.cuda() def train(): model.train() loss_avg = 0.0 progress = ProgressBar() ip1_loader = [] idx_loader = [] correct = 0 for b in range(int(train_sets.len() / batch_size)): images, labels = next(train_gen) images = np.transpose(images, [0, 3, 1, 2]) images = torch.from_numpy(images) labels = torch.from_numpy(labels).long() data, target = torch.autograd.Variable( images.cuda()), torch.autograd.Variable(labels.cuda()) f1, output = model(data) pred = output.data.max(1)[1] correct += float(pred.eq(target.data).sum()) optimizer.zero_grad() optimzer_center.zero_grad() loss = focal_loss(output, target) + centerloss(target, f1) * 0.3 loss.backward() optimizer.step() optimzer_center.step() ip1_loader.append(f1) idx_loader.append((target)) loss_avg = loss_avg * 0.2 + float(loss) * 0.8 print(correct, ll, loss_avg) state['train_accuracy'] = correct / train_sets.len() state['train_loss'] = loss_avg def test(): with torch.no_grad(): model.eval() loss_avg = 0.0 correct = 0 for i in range(valid_sets.len()): images, labels = next(valid_gen) images = np.transpose(images, [0, 3, 1, 2]) images = torch.from_numpy(images) labels = torch.from_numpy(labels).long() data, target = torch.autograd.Variable( images.cuda()), torch.autograd.Variable(labels.cuda()) f1, output = model(data) loss = F.cross_entropy(output, target) pred = output.data.max(1)[1] correct += float(pred.eq(target.data).sum()) loss_avg += float(loss) state['test_loss'] = loss_avg / valid_sets.len() state['test_accuracy'] = correct / valid_sets.len() print(state['test_accuracy']) best_accuracy = 0.0 for epoch in range(40): state['epoch'] = epoch train() test() sch.step(state['train_accuracy']) best_accuracy = (state['train_accuracy'] + state['test_accuracy']) / 2 if best_accuracy > state['best_accuracy']: state['best_accuracy'] = best_accuracy torch.save( model.state_dict(), os.path.join( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/AIChallenger2018/zuixin/log', idx + '.pth')) with open( os.path.join( '/media/dsl/20d6b919-92e1-4489-b2be-a092290668e4/AIChallenger2018/zuixin/log', idx + '.json'), 'w') as f: f.write(json.dumps(state)) f.flush() print(state) print("Best accuracy: %f" % state['best_accuracy']) if state['train_accuracy'] - state[ 'test_accuracy'] > 0.06 and epoch > 30: break