Beispiel #1
0
def run():
    print("CUDA is available: {}".format(torch.cuda.is_available()))
    data_transform = transforms.Compose(
        [Rescale(250), CenterCrop(224),
         Normalize(), ToTensor()])

    # loader will split datatests into batches witht size defined by batch_size
    train_loader = initialize_train_loader(data_transform)
    test_loader = initialize_test_loader(data_transform)

    model_id = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
    # instantiate the neural network
    net = Net()
    net.to(device=device)
    summary(net, (1, 224, 224))
    # define the loss function using SmoothL1Loss
    criterion = nn.SmoothL1Loss()
    # define the params updating function using Adam
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    loss_logger = []

    for i in range(1, epochs + 1):
        model_name = 'model-{}-epoch-{}.pt'.format(model_id, i)

        # train all data for one epoch
        train(net, criterion, optimizer, i, train_loader, model_id,
              loss_logger)

        # evaludate the accuracy after each epoch
        evaluate(net, criterion, i, test_loader)

        # save model after every 5 epochs
        # https://discuss.pytorch.org/t/loading-a-saved-model-for-continue-training/17244/3
        # https://github.com/pytorch/pytorch/issues/2830
        # https://pytorch.org/tutorials/beginner/saving_loading_models.html
        if i % 5 == 1:
            torch.save(
                {
                    'epoch': i,
                    'model': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'loss_logger': loss_logger,
                }, model_dir + model_name)

    print("Finished training!")
    model_name = 'model-{}-final.pt'.format(model_id)
    torch.save(
        {
            'epoch': epochs,
            'model': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'loss_logger': loss_logger,
        }, model_dir + model_name)
Beispiel #2
0
def create_model(args):
    model = Net()
    if args.start_epoch:
        path = os.path.join(args.checkpoints_dir,
                            f'checkpoint-{args.start_epoch}.pth')
        model.load_state_dict(torch.load(path))
    else:
        model.load_pretrained(args.ddr_weights)
    return model.to(args.device)
Beispiel #3
0
def main(args):
    epochs = args.epochs
    batch_size = args.batch_size
    val_frac = args.val_frac
    lr = args.lr
    momentum = args.momentum
    total_it = args.total_it

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)

    dataset_train = HomographyDataset(val_frac=val_frac, mode='train')
    dataset_eval = HomographyDataset(val_frac=val_frac, mode='eval')

    dataloader_train = DataLoader(dataset_train,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=4)

    dataloader_eval = DataLoader(dataset_eval,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=2)

    net = Net()
    net.to(device)
    criterion = nn.MSELoss()
    optimizer = optim.SGD(net.parameters(), momentum=momentum, lr=lr)

    log_every = len(dataloader_train) // 2

    n_it, lr_it = 0, 0
    while n_it < total_it:
        train(dataloader_train, device, net, criterion, optimizer)
        n_it += len(dataloader_train)
        test(dataloader_eval, device, net, criterion, n_it)

        if lr_it >= 30000:
            d = optimizer.state_dict()
            d['param_groups'][0]['lr'] /= 10.0
            optimizer.load_state_dict(d)
            lr_it = 0
Beispiel #4
0
def main():
    args = get_args()
    print(args)

    # set device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # device = 'cpu'

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    net = Net(device=device, mode=args.target, target_mode=args.target_mode)
    net = net.eval()
    net = net.to(device)
    load_model(
        net,
        device,
        fullpath='trained_models/Net_continuous_trained/checkpoint_274.pth.tar'
    )

    imgs_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Real-Images/png'
    imgs_filelist = [
        os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)
        if img.endswith('.png')
    ]

    for i, img in enumerate(imgs_filelist):
        # x,x_paths, y, y_paths = data
        x = plt.imread(img)
        x = np.expand_dims(x, 0)
        x = np.transpose(x, (0, 3, 1, 2))
        x = x[:, :, 2:-2, 8:-8]
        x = torch.Tensor(x).to(device)
        with torch.no_grad():
            out = net(x)
        out = out.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        plt.figure(1)
        if args.target_mode == 'discrete':
            out = np.argmax(out, axis=1)
            out = out[0]
        # out = np.squeeze(out,0)
        out = (out - np.min(out)) / (np.max(out) - np.min(out))
        ax1 = plt.subplot(1, 3, 1)
        # x = (x + 1) / 2
        ax1.imshow(np.transpose(x[0], (1, 2, 0)))
        ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1)
        ax3.imshow(out, cmap='jet')
        # plt.suptitle(label, fontsize="large")
        plt.show()
def get_model(args):
    ''' define model '''
    model = None
    if args.model == 'Net':
        model = Net()
    elif args.model == 'FCNet':
        model = FCNet()
    elif args.model == 'ConvNet':
        model = ConvNet()
    else:
        raise ValueError('The model is not defined!!')

    print('---Model Information---')
    print('Net:', model)
    print('Use GPU:', args.use_cuda)
    return model.to(args.device)
Beispiel #6
0
 def main(self):
     num_of_neurons = 151
     num_of_hidden_layers = 2
     learning_rate = 0.001
     epochs = 10
     args = dict()
     args['num_of_neurons'], args['num_of_hidden_layers'], args[
         'learning_rate'], args[
             'epochs'] = num_of_neurons, num_of_hidden_layers, learning_rate, epochs
     loss = self.objective(args)
     print(loss)
     net = Net(num_of_neurones=num_of_neurons,
               num_of_hidden_layers=num_of_hidden_layers,
               num_of_inputs=self.hw_model.inputs,
               num_of_outputs=self.hw_model.outputs)
     net = net.to(self.device)
     print(self.test(net))
class TrainBigramNN(tune.Trainable):
    def _setup(self, config):
        print("Loading word vectors...")
        word2index, word_vecs = process_word_vecs(FAST_TEXT)
        # Note that the word embeddings are normalized.
        self.wv = WV(F.normalize(word_vecs), word2index)
        # wv = WV(word_vecs, word2index)
        print("Done.")
        self.corpus_size = config["corpus_size"]
        bigram_fn_name = "diff"
        out_bigram_dim = 300
        dist_fn_name = "cos_dist"
        loss_fn_name = "mrl"
        margin = config["margin"]
        self.lr = config["lr"]
        self.num_epochs = config["num_epochs"]
        self.batch_size = config["batch_size"]
        self.test_model = True
        self.test_freq = config["test_freq"]
        with open(PROCESSED / "train.{}.pkl".format(str(self.corpus_size)), "rb") as f:
            wiki_train = pickle.load(f)
        with open(PROCESSED / "valid.pkl", "rb") as f:
            wiki_valid = pickle.load(f)
        wiki_combined = wiki_train + wiki_valid
        self.corpus = Corpus("wiki", wiki_combined, self.wv, filter_stopwords=True)
        self.model = Net(
            self.wv.vecs.size(1), BigramEncoder(bigram_fn_name), out_bigram_dim
        )
        self.model.to(device)
        self.dist_fn = DistanceFunction(dist_fn_name)
        self.loss_fn = LossFunction(loss_fn_name, margin=margin)
        self.device = device
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
        torch.manual_seed(config["seed"])
        print("Traninig on Wikipedia corpus of size {}".format(self.corpus_size))

    def _train(self):
        result = train(
            self.wv,
            self.corpus.ix_sents[: self.corpus_size],
            self.corpus.sent_lengths[: self.corpus_size],
            self.corpus.ix_sents[self.corpus_size :],
            self.corpus.sent_lengths[self.corpus_size :],
            self.model,
            self.wv.vecs,
            self.dist_fn,
            self.loss_fn,
            self.optimizer,
            self.lr,
            self.num_epochs,
            self.batch_size,
            self._iteration,
            self.test_model,
            self.test_freq,
            self.device,
        )
        return result

    def _save(self, tmp_checkpoint_dir):
        checkpoint_path = str(Path(tmp_checkpoint_dir) / "model.pth")
        torch.save(self.model.state_dict(), checkpoint_path)
        return checkpoint_path

    def _restore(self, tmp_checkpoint_dir):
        checkpoint_path = str(Path(tmp_checkpoint_dir) / "model.pth")
        self.model.load_state_dict(torch.load(checkpoint_path))
Beispiel #8
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = Net(50, 50, 50)
#criterion = nn.MSELoss()
criterion = nn.L1Loss(reduction='mean')
#criterion = nn.KLDivLoss(reduction='batchmean')
#criterion = nn.HingeEmbeddingLoss(reduction='sum')
#criterion = nn.CosineEmbeddingLoss()
#criterion = myloss.HingeLoss()
lr = 0.01
momentum = 0.9
#optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
#optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-4)

model.to(device)
losses_tr = []
losses_ts = []

num_epoches = 100
#训练

for epoch in range(num_epoches):
    print(f'epoch {epoch}, training....')
    loss_tr = 0
    acc_tr = 0
    if epoch % 5 == 0:
        #每隔5轮
        optimizer.param_groups[0]['lr'] *= 0.9

    optimizer.zero_grad()
for k, v in weights.items():
    k = k.split('.')[1:]
    k = ".".join(k)
    state_dict.update({k: v})
# import pdb; pdb.set_trace()
refine_net.load_state_dict(state_dict)
vgg = VGG16(requires_grad=False)
vgg.to(device)

if torch.cuda.device_count() > 1 and MULTI_GPU:
    print("Using {} GPUs...".format(torch.cuda.device_count()))
    refine_net = nn.DataParallel(refine_net)
else:
    print("GPU ID: {}".format(device))

refine_net = refine_net.to(device)

d_loss_fn = nn.BCELoss()
d_loss_fn = d_loss_fn.to(device)
refine_loss_fn = CustomLoss()
refine_loss_fn = refine_loss_fn.to(device)

from dataset_cloth import define_dataset
tfrecord_path = "/content/uplara_tops_v10_refined_grapy.record"
batch_size = 1
trainset, trainset_length = define_dataset(tfrecord_path,
                                           batch_size,
                                           train=True)
valset, valset_length = define_dataset(tfrecord_path, batch_size, train=False)

tps_weights_path = "gs://experiments_logs/gmm/TOPS/short_sleeves_high_slope_loss/weights/model_44"
Beispiel #10
0
print()

#Additional Info when using cuda
if device.type == 'cuda':
    print(torch.cuda.get_device_name(0))
    print('Memory Usage:')
    print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024**3, 1),
          'GB')
    print('Cached:   ', round(torch.cuda.memory_cached(0) / 1024**3, 1), 'GB')
## TODO: Once you've define the network, you can instantiate it
# one example conv layer has been provided for you
from models import Net

net = Net()
print(net)
net = net.to(device)

# ## Transform the dataset
#
# To prepare for training, create a transformed dataset of images and keypoints.
#
# ### TODO: Define a data transform
#
# In PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so your model's loss does not blow up during training, it is also suggested that you normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and you **do not** need to modify these; take a look at this file (you'll see the same transforms that were defined and applied in Notebook 1).
#
# To define the data transform below, use a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of:
# 1. Rescaling and/or cropping the data, such that you are left with a square image (the suggested size is 224x224px)
# 2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1]
# 3. Turning these images and keypoints into Tensors
#
# These transformations have been defined in `data_load.py`, but it's up to you to call them and create a `data_transform` below. **This transform will be applied to the training data and, later, the test data**. It will change how you go about displaying these images and keypoints, but these steps are essential for efficient training.
Beispiel #11
0
def main():
    args = get_args()
    print(args)

    # set device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    test_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Sintel/Filtered/rgb'
    label_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Sintel/Filtered/GT'

    test_filelist = [
        os.path.join(test_dir, img) for img in os.listdir(test_dir)
        if 'alley_1' in img
    ]
    test_labels_filelist = [
        img.replace(test_dir, label_dir).replace('_1100_maskImg.png',
                                                 '_GT.dpt')
        for img in test_filelist
    ]

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    test_dataset = Dataset(image_filelist=test_filelist,
                           label_filelist=test_labels_filelist,
                           transforms=transform,
                           pickle_name='test.pickle',
                           train=False)

    test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                   batch_size=1,
                                                   shuffle=True,
                                                   num_workers=8)

    net = Net(device=device, mode=args.target, target_mode=args.target_mode)
    net = net.eval()
    net = net.to(device)
    load_model(
        net,
        device,
        fullpath=
        '/home/yotamg/PycharmProjects/dfd/trained_models/Net_default/checkpoint_76.pth.tar'
    )

    for i, data in enumerate(test_data_loader):
        # x,x_paths, y, y_paths = data
        x, x_path, y, y_path = data
        x = x.to(device)
        with torch.no_grad():
            out = net(x)
        out = out.detach().cpu().numpy()
        x = x.detach().cpu().numpy()
        plt.figure(1)
        out = np.argmax(out, axis=1)
        out = np.squeeze(out, 0)
        out = (out - np.min(out)) / (np.max(out) - np.min(out))
        ax1 = plt.subplot(1, 3, 2)
        ax1.imshow(y[0])
        ax2 = plt.subplot(1, 3, 1, sharex=ax1, sharey=ax1)
        x = (x + 1) / 2
        ax2.imshow(np.transpose(x[0], (1, 2, 0)))
        ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1)
        ax3.imshow(out)
        # plt.suptitle(label, fontsize="large")
        plt.show()
Beispiel #12
0
def main():
    # file_structure = check_directories()
    # if file_structure == -1:
    #     print('\nERROR: Directories can\'t be created, error thrown')
    #     return -1
    # else:
    #     print('\nDirectories created successfully...\nLaunching camera module...')
    net = Net()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #
    # dev = torch.cuda.is_available()
    # # Assume that we are on a CUDA machine, then this should print a CUDA device:
    #
    # print(dev)

    net.to(device)
    net.load_state_dict(torch.load('saved_models/keypoints_model_2.pt'))

    ## print out your net and prepare it for testing (uncomment the line below)
    net.eval()

    # net.load_state_dict(torch.load('saved_models/keypoints_model_2.pt'))

    # Fire camera & launch streams
    # pyrs.start()
    serv = pyrs.Service()
    # cam = pyrs.Device(device_id = 0, streams = [pyrs.stream.ColorStream(fps=60),
    #                                             pyrs.stream.DepthStream(fps=60),
    #                                             pyrs.stream.CADStream(fps=60),
    #                                             pyrs.stream.DACStream(fps=60)])
    cam = serv.Device(
        device_id=0,
        streams=[
            pyrs.stream.ColorStream(fps=60),
            # pyrs.stream.DepthStream(fps=60),
            # pyrs.stream.CADStream(fps=60),
            # pyrs.stream.DACStream(fps=60)
        ])
    # scale = cam.depth_scale

    # Some important variables
    flag_save_frames = False  #
    file_num = 0
    # cap = cv2.VideoCapture(0)

    # Define the codec and create VideoWriter object
    # fourcc = cv2.cv.CV_FOURCC(*'DIVX')
    # out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
    # out = cv2.VideoWriter('./output.avi', -1, 20.0, (640, 480))
    # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
    out = cv2.VideoWriter('output_4.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20,
                          (640, 480))

    # Start fetching Buffer
    print('Starting Buffer...')
    i = 1000
    while (i):
        cam.wait_for_frames()
        image_1 = cam.color[:, :, ::-1]
        gray_1 = cv2.cvtColor(image_1, cv2.COLOR_RGB2GRAY)
        face_cascade = cv2.CascadeClassifier(
            'detector_architectures/haarcascade_frontalface_default.xml')
        faces_1 = face_cascade.detectMultiScale(gray_1, 1.1, 5)

        # make a copy of the original image to plot detections on
        image_with_detections_1 = image_1.copy()

        # loop over the detected faces, mark the image where each face is found
        for (x, y, w, h) in faces_1:
            # face = gray_1
            roi = gray_1[y:y + int(h), x:x + int(w)]
            org_shape = roi.shape
            roi = roi / 255.0

            roi = cv2.resize(roi, (224, 224))
            # image_plot = np.copy(roi)
            roi = roi.reshape(roi.shape[0], roi.shape[1], 1)
            roi = np.transpose(roi, (2, 0, 1))
            roi = torch.from_numpy(roi)
            roi = Variable(roi)
            roi = roi.type(torch.cuda.FloatTensor)
            roi = roi.unsqueeze(0)
            predicted_key_pts = net(roi)
            predicted_key_pts = predicted_key_pts.view(68, -1)
            predicted_key_pts = predicted_key_pts.data
            predicted_key_pts = predicted_key_pts.cpu().numpy()
            predicted_key_pts = predicted_key_pts * 50.0 + 100

            predicted_key_pts[:, 0] = predicted_key_pts[:, 0] * org_shape[
                0] / 224 + x
            predicted_key_pts[:, 1] = predicted_key_pts[:, 1] * org_shape[
                1] / 224 + y

            # cv2.rectangle(image_with_detections_1, (x, y), (x + w, y + h), (0, 0, 255), 3)

            for (x_point, y_point) in zip(predicted_key_pts[:, 0],
                                          predicted_key_pts[:, 1]):
                cv2.circle(image_with_detections_1, (x_point, y_point), 3,
                           (0, 255, 0), -1)

        # current_color = cam.color[:, :, ::-1]
        # current_depth = cam.depth * scale
        # current_cad = cam.cad[:, :, ::-1]
        # current_dac = cam.dac * scale
        out.write(image_with_detections_1)
        cv2.imshow('Color', image_with_detections_1)
        # cv2.imshow('Depth', current_depth)
        # cv2.imshow('CAD', current_cad)
        # cv2.imshow('DAC', current_dac)

        # if flag_save_frames:
        #     num = format(file_num, '08')
        #     cv2.imwrite('./data/depth/' + str(num) + '.png', cam.depth)
        #     cv2.imwrite('./data/color/' + str(num) + '.png', current_color)
        #     cv2.imwrite('./data/dac/' + str(num) + '.png', cam.dac)
        #     cv2.imwrite('./data/cad/' + str(num) + '.png', current_cad)
        #     file_num += 1
        i = i - 1
        k = cv2.waitKey(1)
        if k == ord('q'):
            print('Q Pressed...\nEnding execution')
            break
        if k == ord('f'):
            if flag_save_frames:
                print('F Pressed...\nStopped fetching frames...')
                flag_save_frames = False
            else:
                print('F Pressed...\nStarted fetching frames...')
                flag_save_frames = True

    cam.stop()
    # pyrs.stop()
    out.release()
    serv.stop()
    return 0
def train(options):
    exp_name = options['exp_name']
    batch_size = options['batch_size']
    use_pca = options['use_pca']
    model_type = options['model_type']
    loss_fn = options['loss_fn']
    optim = options['optim']
    use_scheduler = options['use_scheduler']
    lr = options['lr']
    epochs = options['epochs']
    pca_var_hold = options['pca_var_hold']
    debug_mode = options['debug_mode']
    
    if os.path.exists(exp_name):
        shutil.rmtree(exp_name)

    time.sleep(1)
    writer = SummaryWriter(exp_name,flush_secs=1)
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    X = os.listdir('hilbert_data')
    X_train = X[:int(0.8*len(X))]
    X_test = X[int(0.8*len(X)):]
    # X = np.load('bined_x.npy')
    # y = np.load('bined_y.npy')
    # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    # if use_pca and 'Raw' in exp_name:
    #     scaler = PCA(pca_var_hold)
    #     scaler.fit(X_train)
    #     X_train = scaler.transform(X_train)
    #     X_test = scaler.transform(X_test)

    # needed_dim = X_train.shape[1]

    dataset_train = HIL_MOOD(X_train, model_type=model_type,data_type='train',debug_mode=debug_mode)
    train_loader = DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True)
    
    dataset_val = HIL_MOOD(X_test, model_type=model_type,data_type='val')
    valid_loader = DataLoader(dataset=dataset_val, batch_size=batch_size, shuffle=False)
    
    model = Net()
    model.to(device)
    if optim == None:
        print('you need to specify an optimizer')
        exit()
    elif optim == 'adam':
        optimizer = torch.optim.Adam(   model.parameters(), lr=lr)
    elif optim == 'sgd':
        optimizer = torch.optim.SGD(   model.parameters(), lr=lr,momentum=0.9)
    if use_scheduler:
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',verbose=True,threshold=0.0001,patience = 10)
    if loss_fn == None:
        print('you need to specify an optimizer')
        exit()
    else:

        if loss_fn == 'mse':

            loss_fn = torch.nn.MSELoss()
        elif loss_fn == 'cross_entropy':
            loss_fn = torch.nn.CrossEntropyLoss()
    
    
    
    mean_train_losses = []
    mean_valid_losses = []
    valid_acc_list = []
    best = 0  #small number for acc big number for loss to save a model
    
    for epoch in range(epochs):
        model.train()
        train_losses = []
        valid_losses = []
        for i, (images, labels) in enumerate(train_loader):
            if images.shape[0] != batch_size:
                continue
            images = images.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()

            # print(images.shape)
            outputs = model(images)
            # print(images.shape)
            # print(outputs.shape)
            # print(labels.shape)
            # print(i)
            loss =loss_fn(outputs,labels)
            # print('loss: ',loss.item())
            writer.add_scalar('Loss/train', loss.item(), len(train_loader)*epoch+i)

            loss.backward()
            optimizer.step()
            train_losses.append(loss.item())
            del outputs
            # if (i * batch_size) % (batch_size * 100) == 0:
            #     print(f'{i * batch_size} / 50000')
                
        model.eval()
        correct_5_2 = 0
        correct_5_1 = 0
        
        total_loss = 0
        total = 0
        accsat =[0.5,0.05,0.005]
        accs = np.zeros(len(accsat))
        # corrs = np.zeros(len(accsat))
        correct_array = np.zeros(len(accsat))
        with torch.no_grad():
            for i, (images, labels) in enumerate(valid_loader):
                images = images.to(device)
                labels = labels.to(device)
                outputs = model(images)
                loss =  loss_fn(outputs, labels)

                
                for i in range(len(accsat)):

                    correct_array[i] += accat(outputs,labels,thresh=accsat[i])

                # total_loss += loss.item()
                total += labels.size(0)
                
                
                valid_losses.append(loss.item())


                
        mean_train_losses.append(np.mean(train_losses))
        mean_valid_losses.append(np.mean(valid_losses))
        # scheduler.step(np.mean(valid_losses))
        for i in range(len(accsat)):
            accs[i] = 100*correct_array[i]/total
            writer.add_scalar('Acc/val_@'+str(accsat[i]), accs[i], epoch)
        
        if np.mean(valid_losses) < best:
            best = np.mean(valid_losses)
            torch.save(model.state_dict(),os.path.join(os.getcwd(),'models','meh.pth'))
        
        writer.add_scalar('Loss/val', np.mean(valid_losses), epoch)
        # valid_acc_list.append(accuracy)
        if epoch ==epochs-1:
            print('epoch : {}, train loss : {:.4f}, valid loss : {:.4f}, [email protected] : {:.4f}'\
                .format(epoch+1, np.mean(train_losses), np.mean(valid_losses), accsat[1]))
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_acc = 0
    writer = SummaryWriter()
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        # model = ImgClsModel()
        model = Net()
        model = nn.DataParallel(model)

        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    logger = get_logger()

    # Move to GPU, if available
    model = model.to(device)

    # Loss function
    criterion = nn.BCELoss()

    logger.info('init dataset...')
    # Custom dataloaders
    dataset = YooChooseBinaryDataset(root='data/')
    #dataset = dataset.shuffle()
    train_dataset = dataset[:800000]
    val_dataset = dataset[800000:900000]
    test_dataset = dataset[900000:]
    len(train_dataset), len(val_dataset), len(test_dataset)
    train_loader = DataLoader(train_dataset, batch_size=batch_size)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        # One epoch's training
        train_loss = train(train_loader=train_loader,
                           model=model,
                           criterion=criterion,
                           optimizer=optimizer,
                           epoch=epoch,
                           logger=logger)

        writer.add_scalar('model/train_loss', train_loss, epoch)

        # One epoch's validation
        train_acc = evaluate(train_loader, model)
        val_acc = evaluate(val_loader, model)
        test_acc = evaluate(test_loader, model)
        print(
            'Epoch: {:03d}, Loss: {:.5f}, Train Auc: {:.5f}, Val Auc: {:.5f}, Test Auc: {:.5f}'
            .format(epoch, train_loss, train_acc, val_acc, test_acc))

        writer.add_scalar('model/train_acc', train_acc, epoch)
        writer.add_scalar('model/val_acc', val_acc, epoch)
        writer.add_scalar('model/test_acc', test_acc, epoch)

        # Check if there was an improvement
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        best_acc, is_best)
Beispiel #15
0
def create_model(args):
    model = Net()
    model.load_pretrained(args.weights)
    return model.to(args.device)
Beispiel #16
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epcoch = 0
    best_acc = 0
    writer = SummaryWriter()
    epoch_since_improvement = 0

    if checkpoint is None:
        model = Net()
        # model = nn.DataParallel(model)

        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    else:
        checkpoint = torch.load(checkpoint)
        start_epcoch = checkpoint['epoch'] + 1
        epoch_since_improvement = checkpoint['epoch_since_improvement']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    logger = get_logger()

    model = model.to(device)

    criterion = nn.BCELoss()

    logger.info('init dataset...')
    dataset = YoochooseBinaryDataset(root='data/')
    dataset = dataset.shuffle()
    train_dataset = dataset[:800000]
    val_dataset = dataset[800000:900000]
    test_dataset = dataset[900000:]

    train_loader = DataLoader(train_dataset, batch_size=batch_size)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)

    for epoch in range(start_epcoch, args.end_epoch):
        train_loss = train(train_loader=train_loader,
                           model=model,
                           criterion=criterion,
                           optimizer=optimizer,
                           epoch=epoch,
                           logger=logger)
        writer.add_scalar('model/train_loss', train_loss, epoch)

        train_acc = evalute(train_loader, model)
        val_acc = evalute(val_loader, model)
        test_acc = evalute(test_loader, model)
        # print('epoch:{:03d},loss:{:.5f},train acc:{:.5f},val acc:{:.5f},test acc:{:.5f}'.format(epoch,train_acc,val_acc,test_acc))
        print(
            'Epoch: {:03d}, Loss: {:.5f}, Train Auc: {:.5f}, Val Auc: {:.5f}, Test Auc: {:.5f}'
            .format(epoch, train_loss, train_acc, val_acc, test_acc))

        writer.add_scalar('model/train_acc', train_acc, epoch)
        writer.add_scalar('model/val_acc', val_acc, epoch)
        writer.add_scalar('model/test_acc', test_acc, epoch)

        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        if not is_best:
            epoch_since_improvement += 1
            print('\n epcoch since last improvement:%d\n' %
                  (epoch_since_improvement))
        else:
            epoch_since_improvement = 0

        save_checkpoint(epoch, epoch_since_improvement, model, optimizer,
                        best_acc, is_best)
Beispiel #17
0
        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.item()
        if i % 500 == 499:  # print every 2000 mini-batches
            print('[Epoch: %d, Batch: %5d] Loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 500))
            running_loss = 0.0

net.eval()
net.to('cpu')
with torch.no_grad():
    correct = 0
    total = 0
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    accuracy = (correct / total) * 100
    print('Test Accuracy of the network on the 10000 test images: {} %'.format(
        accuracy))

if accuracy > 85:
Beispiel #18
0
                          shuffle=True,
                          num_workers=0)
# 创建测试集
test_dataset = MyFacialKeyPointsDataset(
    csv_file='./data/test_frames_keypoints.csv',
    root_dir='./data/test',
    transform=data_transform)
# 批量加载测试数据
test_loader = DataLoader(test_dataset,
                         batch_size=batch_size,
                         shuffle=True,
                         num_workers=0)
# 定义网络
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Net()
net.to(device)


def net_sample_output():
    """
    在一个批次测试图像上测试模型
    :return:
    """
    for i, sample in enumerate(test_loader):
        images, key_pts = sample['image'], sample['keypoints']
        images = images.type(torch.FloatTensor)
        # 前向传播
        output_pts = net(images)
        # 将输出reshpe为68个坐标点
        output_pts = output_pts.view(output_pts.size()[0], 68, -1)
        # 测试一张
def train(data_folder, output_folder, es_patience, epochs, TTA):
    device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
    print("使用デバイス:", device)

    train = pd.read_csv(data_folder + '/train.csv')
    test = pd.read_csv(data_folder + '/test.csv')

    arch = EfficientNet.from_pretrained('efficientnet-b1')  #モデル

    meta_features = list(train.columns)
    meta_features.remove('image_name')
    meta_features.remove('target')
    meta_features.remove('fold')

    #パラメータ各種
    oof = np.zeros((len(train), 1))
    preds = torch.zeros((len(test), 1), dtype=torch.float32, device=device)

    skf = KFold(n_splits=5, shuffle=True, random_state=47)

    #===========================
    # ループスタート
    #===========================
    for fold, (idxT, idxV) in enumerate(skf.split(np.arange(15)), 1):
        print('=' * 20, 'Fold', fold, '=' * 20)

        #kfold
        train_idx = train.loc[train['fold'].isin(idxT)].index
        val_idx = train.loc[train['fold'].isin(idxV)].index

        #学習パラメータ
        model_path = f'/model_{fold}.pth'
        best_val = 0
        patience = es_patience

        model = Net(arch=arch, n_meta_features=len(meta_features))

        model = model.to(device)

        #optimizer
        optim = torch.optim.Adam(model.parameters(), lr=0.001)
        #scheduler
        scheduler = ReduceLROnPlateau(optimizer=optim,
                                      mode='max',
                                      patience=1,
                                      verbose=True,
                                      factor=0.2)

        criterion = nn.BCEWithLogitsLoss()

        trainDataset = MelanomaDataset(
            df=train.iloc[train_idx].reset_index(drop=True),
            imfolder=data_folder + '/train',
            train=True,
            transforms=train_transform,
            meta_features=meta_features)

        valDataset = MelanomaDataset(
            df=train.iloc[val_idx].reset_index(drop=True),
            imfolder=data_folder + '/train',
            train=True,
            transforms=test_transform,
            meta_features=meta_features)

        testDataset = MelanomaDataset(df=test,
                                      imfolder=data_folder + '/test',
                                      train=False,
                                      transforms=test_transform,
                                      meta_features=meta_features)

        train_loader = DataLoader(dataset=trainDataset,
                                  batch_size=64,
                                  shuffle=True,
                                  num_workers=2)
        val_loader = DataLoader(dataset=valDataset,
                                batch_size=16,
                                shuffle=False,
                                num_workers=2)
        test_loader = DataLoader(dataset=testDataset,
                                 batch_size=16,
                                 shuffle=False,
                                 num_workers=2)

        #=====================
        # epochs
        #=====================
        for epoch in range(epochs):
            start_time = time.time()
            correct = 0
            epoch_loss = 0

            #train_loop
            model.train()
            for x, y in train_loader:
                x[0] = torch.tensor(x[0], device=device, dtype=torch.float32)
                x[1] = torch.tensor(x[1], device=device, dtype=torch.float32)
                y = torch.tensor(y, device=device, dtype=torch.float32)
                optim.zero_grad()
                z = model(x)
                loss = criterion(z, y.unsqueeze(1))
                loss.backward()
                optim.step()
                pred = torch.round(torch.sigmoid(z))
                correct += (pred.cpu() == y.cpu().unsqueeze(1)).sum().item()
                epoch_loss += loss.item()
            train_acc = correct / len(train_idx)

            model.eval()
            val_preds = torch.zeros((len(val_idx), 1),
                                    dtype=torch.float32,
                                    device=device)

            with torch.no_grad():
                #validation_loop
                for j, (x_val, y_val) in enumerate(val_loader):
                    x_val[0] = torch.tensor(x_val[0],
                                            device=device,
                                            dtype=torch.float32)
                    x_val[1] = torch.tensor(x_val[1],
                                            device=device,
                                            dtype=torch.float32)
                    y_val = torch.tensor(y_val,
                                         device=device,
                                         dtype=torch.float32)
                    z_val = model(x_val)
                    val_pred = torch.sigmoid(z_val)
                    val_preds[j *
                              val_loader.batch_size:j * val_loader.batch_size +
                              x_val[0].shape[0]] = val_pred

                val_acc = accuracy_score(train.iloc[val_idx]['target'].values,
                                         torch.round(val_preds.cpu()))
                val_roc = roc_auc_score(train.iloc[val_idx]['target'].values,
                                        val_preds.cpu())

                print(
                    'Epoch{:03}: | Loss:{:.3f} | Train acc:{:.3f} | Val acc:{:.3f} | Val roc_auc:{:.3f} | Training time:{}'
                    .format(
                        epoch + 1, epoch_loss, train_acc, val_acc, val_roc,
                        str(
                            datetime.timedelta(seconds=time.time() -
                                               start_time))[:7]))

                scheduler.step(val_roc)

                if val_roc >= best_val:
                    best_val = val_roc
                    patience = es_patience

                    torch.save(model, output_folder + model_path)

                else:
                    patience -= 1
                    if patience == 0:
                        print('Early stopping. Best Val roc_auc:{:.3f}'.format(
                            best_val))
                        break

        model = torch.load(output_folder + model_path)
        model.eval()
        val_preds = torch.zeros((len(val_idx), 1),
                                dtype=torch.float32,
                                device=device)

        #evaluation loop
        with torch.no_grad():
            for j, (x_val, y_val) in enumerate(val_loader):
                x_val[0] = torch.tensor(x_val[0],
                                        device=device,
                                        dtype=torch.float32)
                x_val[1] = torch.tensor(x_val[1],
                                        device=device,
                                        dtype=torch.float32)
                y_val = torch.tensor(y_val, device=device, dtype=torch.float32)
                z_val = model(x_val)
                val_pred = torch.sigmoid(z_val)
                val_preds[j * val_loader.batch_size:j * val_loader.batch_size +
                          x_val[0].shape[0]] = val_pred
            oof[val_idx] = val_preds.cpu().numpy()

            for _ in range(TTA):
                for i, x_test in enumerate(test_loader):
                    x_test[0] = torch.tensor(x_test[0],
                                             device=device,
                                             dtype=torch.float32)
                    x_test[1] = torch.tensor(x_test[1],
                                             device=device,
                                             dtype=torch.float32)
                    z_test = model(x_test)
                    z_test = torch.sigmoid(z_test)
                    preds[i *
                          test_loader.batch_size:i * test_loader.batch_size +
                          x_test[0].shape[0]] += z_test

                preds /= TTA

    preds /= skf.n_splits

    return preds, oof
Beispiel #20
0
    # print (hyperopt.pyll.stochastic.sample(space))

    # minimize the objective over the space
    trials = Trials()
    best = fmin(ag.objective, space, algo=tpe.suggest, max_evals=10, trials=trials)

    print(best)
    print (trials.results)
    print (trials.losses())
    print(space_eval(space, best))
    num_of_neurons_list = [a['misc']['vals']['num_of_neurons'][0] for a in trials.trials]
    num_of_hidden_layers_list = [a['misc']['vals']['num_of_hidden_layers'][0] for a in trials.trials]
    num_of_epochs = [a['misc']['vals']['epochs'][0] for a in trials.trials]
    learning_rate_list = [a['misc']['vals']['learning_rate'][0] for a in trials.trials]
    for i, trial in enumerate(trials.losses()):
        print ("Experiment {:2}: Hidden Layers: {:3}, Neurons: {:3}, Learing Rate: {:3}, Epochs: {:3}, Loss: {:1.5}".
               format(i, num_of_hidden_layers_list[i], num_of_neurons_list[i],learning_rate_choices[learning_rate_list[i]],num_of_epochs[i], trial))
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter3D(num_of_neurons_list, num_of_hidden_layers_list, trials.losses())
    ax.set_xlabel('#Neurons')
    ax.set_ylabel('#Layers')
    ax.set_zlabel('Loss')
    plt.show()
    best_list.append(best)
print (best_list)
net = Net(num_of_neurones=int(best['num_of_neurons']), num_of_hidden_layers=int(best['num_of_hidden_layers']), num_of_inputs=ag.hw_model.inputs, num_of_outputs=ag.hw_model.outputs)
net = net.to(ag.device)
print (ag.test(net))