Beispiel #1
0
def train():
    try:
        #check environment
        env.config_env()
        
        #check dataset
        dt = dataset.dataset()
        dt.check()
        dt.generate_dataflow(image_size = config.configured_image_size)
        
        dt.debug()  #for debuging data set
        
        #instance and initialize  model
        model = model_manager.get_model_instance(dataset = dt)      
        model.define()
        model.generate()
        model.train()
        
    except Exception as e:
        print(e)
        traceback.print_exc()
    finally:
        print('over')
Beispiel #2
0
        # ExpandBorder(size=(368,368),resize=True),
        RandomResizedCrop(size=(200, 200)),
        # RandomResizedCrop(size=(336, 336)),
        RandomHflip(),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ]),
    'val': Compose([
        # ExpandBorder(size=(336,336),resize=True),
        ExpandBorder(size=(200,200),resize=True),
        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

data_set = {}
data_set['train'] = dataset(imgroot=rawdata_root,anno_pd=train_pd,encoder=encoder,
                           transforms=data_transforms["train"],
                           )
data_set['val'] = dataset(imgroot=rawdata_root,anno_pd=val_pd,encoder=encoder,
                           transforms=data_transforms["val"],
                           )
dataloader = {}
dataloader['train']=torch.utils.data.DataLoader(data_set['train'], batch_size=16,
                                               shuffle=True, num_workers=8,collate_fn=collate_fn)
dataloader['val']=torch.utils.data.DataLoader(data_set['val'], batch_size=16,
                                               shuffle=True, num_workers=8,collate_fn=collate_fn)



'''model'''

model = se_resnet101_xuelang(num_classes=160)
Beispiel #3
0
                                    random_state=43,
                                    stratify=all_pd['label'])
true_test_pb = pd.read_csv("./datasets/test.txt",
                           sep=" ",
                           header=None,
                           names=['ImageName'])
"addFakeLabel"
true_test_pb['label'] = 1

test_pd = true_test_pb if mode == "test" else val_pd
print(test_pd.head())

data_set = {}
data_set['test'] = dataset(
    imgroot=os.path.join(rawdata_root, mode),
    anno_pd=test_pd,
    transforms=test_transforms,
)
data_loader = {}
data_loader['test'] = torchdata.DataLoader(data_set['test'],
                                           batch_size=4,
                                           num_workers=4,
                                           shuffle=False,
                                           pin_memory=True,
                                           collate_fn=collate_fn)

model_name = 'inceptionv4_nomal_label'
resume = './model/inceptionv4/weights-18-378-[0.9193].pth'

# model =resnet18(pretrained=True)
# model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)
    def __init__(self, mode=config.configured_predict_modes[0]):
        self.__mode = mode
        self.__loaded_model = None

        self.__dataset = dataset.dataset()
Beispiel #5
0
    from core.net_sgd_corrector import SGD
    from core.net_adam_corrector import Adam
    from core.net_transfer_functions import functions
    from dataset.dataset import dataset
    from dataset.normalizer import calculate_normalize, normalize_dataset

    net = Net(functions['tanh'], Adam)
    # net = Net(functions['tanh'], NAG, corrector_param={'mu': 0.97})
    # net = Net(functions['tanh'], SC)

    net.initialize_from(
        '/Users/nikon/PycharmProjects/laperseptron/data/iris.config.json',
        0.001)
    # net.load_from('/Users/nikon/PycharmProjects/laperseptron/data/iris.net.json')

    train = dataset(
        '/Users/nikon/PycharmProjects/laperseptron/data/iris.train.csv')
    test = dataset(
        '/Users/nikon/PycharmProjects/laperseptron/data/iris.test.csv')

    net.set_normalization(calculate_normalize(train))
    normalize_dataset(train, net)
    normalize_dataset(test, net)

    net.train(5, 0.1, train, test, 2)
    # net.train(10, 0.1, train, test, 2)
    # net.train(25, 0.1, train, test, 5)

    net.save_to('/Users/nikon/PycharmProjects/laperseptron/data/iris.net.json')

    print('\n%sTest Iris Setosa%s' % (Colors.OKGREEN if net.calculate(
        test[0][0]) == 'Iris Setosa' else Colors.FAIL, Colors.ENDC))
    tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits))
# loss += 0.0005 * tf.reduce_sum([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.01
learning_rate = tf.train.exponential_decay(initial_learning_rate,
                                           global_step=global_step,
                                           decay_steps=1000,
                                           decay_rate=0.9)
add_global = global_step.assign_add(1)
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
prediction = tf.to_int64(tf.argmax(logits, 1))
correct_prediction = tf.equal(prediction, tf.argmax(y_, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 训练和测试数据,可将n_epoch设置更大一些
x_train, y_train = dataset(rawdata_root, train_pd, image_size,
                           classes).images, dataset(rawdata_root, train_pd,
                                                    image_size, classes).labels
x_val, y_val = dataset(rawdata_root, val_pd, image_size,
                       classes).images, dataset(rawdata_root, val_pd,
                                                image_size, classes).labels
n_epoch = 10
batch_size = 8
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
#saver = tf.tf.train.Saver()
for epoch in range(n_epoch):
    start_time = time.time()
    step = 0
    # training
    train_loss, train_acc, n_batch = 0, 0, 0
    for x_train_a, y_train_a in minibatches(x_train,
Beispiel #7
0
AIM Lab
'''

import os
from other.parser import parse_input
from dataset.dataset import dataset
from preprocess.preprocess import MakeH5File
from train import Train
from test import Test

if __name__ == "__main__":

    arg = parse_input()

    # Download dataset
    dataset(arg)

    if arg.mode == 'train':

        # PreProcess
        root = os.path.dirname(os.path.realpath(__file__))
        h5 = MakeH5File(root, arg)
        h5.run()
        # Train
        train = Train(arg)
        train.run()

    if arg.mode == 'test':
        test = Test(arg)
        test.run()
Beispiel #8
0
                save_path = os.path.join(save_dir, 'weights-%d-%d-[%.5f].pth' % (epoch, batch_cnt, loss.item()))
                torch.save(model.state_dict(), save_path)
                logging.info('saved model to %s' % (save_path))
                logging.info('--' * 30)


if __name__ == '__main__':

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    logfile = save_dir + '/trainlog.log'
    trainlog(logfile)

    '''data'''
    data_set = {}
    data_set['train'] = dataset(imgroot=rawdata_root, anno_pd=train_pd,
                                transforms=data_transforms["train"])
    dataloader = {}
    dataloader['train'] = torch.utils.data.DataLoader(data_set['train'], batch_size=4,
                                               shuffle=True, num_workers=4, collate_fn=collate_fn)
    '''model'''
    # model = md.Modified_Densenet169(num_classs=100)
    model = md.Modified_Resnet152(num_classs=100)
    # model = md.Modified_SENet154(num_classs=100)

    resume = None
    if resume:
        logging.info('Resuming finetune from %s' % resume)
        model.load_state_dict(torch.load(resume))
    model = model.cuda()

    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-5)
Beispiel #9
0
    predicted[predicted >= 2] = 1
    predicted = predicted.view(-1)
    accuracy = torch.sum(predicted == labels).item() / labels.size()[0]
    return accuracy


BATCH_SIZE = 32
EPOCHS = 1
LEARNING_RATE = 0.001

np.random.seed(0)
torch.manual_seed(1)

cuda = torch.cuda.is_available()

train_set = dataset(train=True)
test_set = dataset(train=False)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set,
                                          batch_size=2 * BATCH_SIZE,
                                          shuffle=False)

model = net()
if cuda:
    model = model.cuda()
criterion = loss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
# optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
Beispiel #10
0
# PNASnet's transforms
# test_transforms = transforms.Compose([
#                 transforms.Resize(299),
#                 transforms.CenterCrop(299),
#                 transforms.ToTensor(),
#                 transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
#             ])

if __name__ == '__main__':

    test_pd = true_test_pb
    # print(test_pd.head())

    data_set = {}
    data_set['test'] = dataset(imgroot=rawdata_root, anno_pd=test_pd,
                               transforms=test_transforms)
    data_loader = {}
    data_loader['test'] = torchdata.DataLoader(data_set['test'], batch_size=4, num_workers=4,
                                           shuffle=False, pin_memory=True, collate_fn=collate_fn)

    # model = md.Modified_Densenet169(num_classs=100)
    model = md.Modified_Resnet152(num_classs=100)
    # model = md.Modified_SENet154(num_classs=100)

    print('Resuming finetune from %s' % resume)
    model.load_state_dict(torch.load(resume))
    model = model.cuda()
    model.eval()
    criterion = CrossEntropyLoss()

    if not os.path.exists('./output'):
Beispiel #11
0
 args=parser.parse_args()
 log_dir=os.path.join(args.log_dir,args.model_name,args.patch_dataset,args.note)
 ckpt_path = get_ckpt_path(log_dir)
 print('load checkpoint file from', ckpt_path)
 state_dict = torch.load(ckpt_path)
 
 model=get_model(args)    
 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 model.to(device)
 
 if 'model_state' in state_dict.keys():
     model.load_state_dict(state_dict['model_state'])
 else:
     model.load_state_dict(state_dict)
     
 val_loader=TD.DataLoader(dataset=dataset(args,split='test'),
                            batch_size=args.batch_size,
                            shuffle=True,
                            drop_last=True,
                            num_workers=2)
 
 model.eval()
 for idx,(datas) in enumerate(val_loader):
     imgs=datas['imgs'].to(device).float()
     points_offset=32*datas['points_offset'].data.cpu().numpy()
     points_perturb_true=datas['points_perturb'].data.cpu().numpy()
     img_path=datas['img_path']
     
     outputs=32*model.forward(imgs)
     
     show_offset(imgs,img_path,points_offset,points_perturb_true,outputs)
Beispiel #12
0
rawdata_root = '/data/round2B/test'
true_test_pb = pd.read_csv("/data/round2B/semifinal_image_phase2/image.txt",
                           header=None,
                           names=['ImageName'])

print(true_test_pb.head(10))

true_test_pb['label'] = 'ZJL296'

test_pd = true_test_pb if mode == "test" else val_pd
print(test_pd.head())

data_set = {}
data_set['test'] = dataset(
    imgroot=rawdata_root,
    anno_pd=test_pd,
    encoder=test_encoder,
    transforms=test_transforms,
)
data_loader = {}
data_loader['test'] = torchdata.DataLoader(data_set['test'],
                                           batch_size=8,
                                           num_workers=4,
                                           shuffle=False,
                                           pin_memory=True,
                                           collate_fn=collate_fn)

# model_name = 'resnet50-out'
resume = None

model = se_resnet101_xuelang(num_classes=160)
# model =resnet50(pretrained=False)
Beispiel #13
0
import pprint


if __name__ == '__main__':
    from core.net_interface import Net
    from core.net_standard_corrector import Standard
    from core.net_potential_corrector import Potential
    from core.net_distance_functions import distance_functions
    from dataset.dataset import dataset

    dataset = dataset('/Users/nikon/PycharmProjects/lakohonen/data/iris.train.csv')
    classes = [j for j in range(3) for i in range(50)]

    potential = Potential(nu=1, tau=3000, p_min=0.75)
    net = Net('Iris', distance_functions['manhattan'], potential)
    net.initialize(input=4, m=10, n=15, factor=1, negative=False)
    # net.load_from('/Users/nikon/PycharmProjects/lakohonen/data/iris.cluster.net.json')
    net.train(500, dataset, stop_error=10 ** -15, stop_delta=10 ** -15)

    path = '/Users/nikon/PycharmProjects/lakohonen/data'
    net.visualize_maps(dataset, 'avg', path)
    net.visualize_u_matrix(path)
    net.visualize_clusterization(dataset, cluster_number=3, classes=classes, path=path)

    net.save_to('/Users/nikon/PycharmProjects/lakohonen/data/iris.net.json')
Beispiel #14
0
def DataTransform(cfg, rawdata_root, train_pd, test_pd):
    if True:
        data_transforms = {
            'swap': transforms.Compose([
                transforms.Resize((512,512)),
                transforms.RandomRotation(degrees=15),
                transforms.RandomCrop((448,448)),
                transforms.RandomHorizontalFlip(),
                transforms.Randomswap((cfg['swap_num'],cfg['swap_num'])),
            ]),
            'unswap': transforms.Compose([
                transforms.Resize((512,512)),
                transforms.RandomRotation(degrees=15),
                transforms.RandomCrop((448,448)),
                transforms.RandomHorizontalFlip(),
            ]),
            'totensor': transforms.Compose([
                transforms.Resize((448,448)),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]),
            'None': transforms.Compose([
                transforms.Resize((512,512)),
                transforms.CenterCrop((448,448)),
            ]),
        }
        data_set = {}
        data_set['train'] = dataset(
                cfg,
                imgroot=rawdata_root,
                anno_pd=train_pd,
                unswap=data_transforms["unswap"],
                swap=data_transforms["swap"],
                totensor=data_transforms["totensor"],
                centercrop = data_transforms["None"],
                train=True
                )
        data_set['val'] = dataset(
                cfg,
                imgroot=rawdata_root,
                anno_pd=test_pd,
                unswap=data_transforms["unswap"],
                swap=data_transforms["swap"],
                totensor=data_transforms["totensor"],
                centercrop = data_transforms["None"],
                train=False
                )
        dataloader = {}
        dataloader['train']=torch.utils.data.DataLoader(
                data_set['train'],
                batch_size=cfg['batch_size'],
                shuffle=True,
                num_workers=cfg['batch_size'],
                # select need origianl:swap ratio for training 1:0, 0:1, 1:1, 1:2, 1:3
                collate_fn=collate_fn_for_train_11
                )
        dataloader['val']=torch.utils.data.DataLoader(
                data_set['val'],
                batch_size=cfg['batch_size'],
                shuffle=True,
                num_workers=cfg['batch_size'],
                collate_fn=collate_fn_for_test
                )
    return data_set, dataloader
Beispiel #15
0
        transforms.Resize(224),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

save_dir = 'resnet50'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logfile = '%s/trainlog.log' % save_dir
trainlog(logfile)
data_set = {}
data_set['train'] = dataset(
    imgroot=datapath + "/train/",
    anno_pd=train_pd,
    transforms=data_transforms["train"],
)
data_set['val'] = dataset(
    imgroot=datapath + "/val/",
    anno_pd=val_pd,
    transforms=data_transforms["val"],
)
dataloader = {}
dataloader['train'] = torch.utils.data.DataLoader(data_set['train'],
                                                  batch_size=4,
                                                  shuffle=True,
                                                  num_workers=4,
                                                  collate_fn=collate_fn)
dataloader['val'] = torch.utils.data.DataLoader(data_set['val'],
                                                batch_size=4,
Beispiel #16
0
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

save_dir = './BD_SEResNeXt101_32x4d/models/SEResNeXt101_32x4d'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logfile = '%s/trainlog.log' % save_dir
trainlog(logfile)
data_set = {}
data_set['train'] = dataset(
    imgroot=os.path.join(rawdata_root, "train"),
    anno_pd=train_pd,
    transforms=data_transforms["train"],
)
data_set['val'] = dataset(
    imgroot=os.path.join(rawdata_root, "train"),
    anno_pd=val_pd,
    transforms=data_transforms["val"],
)
dataloader = {}
dataloader['train'] = torch.utils.data.DataLoader(data_set['train'],
                                                  batch_size=8,
                                                  shuffle=True,
                                                  num_workers=4,
                                                  collate_fn=collate_fn)
dataloader['val'] = torch.utils.data.DataLoader(data_set['val'],
                                                batch_size=4,