Beispiel #1
0
            val_loader = DataLoader(dset,
                                    batch_size=batch_size,
                                    sampler=val_subsampler,
                                    collate_fn=dset_og.custom_collate_fn)

            num_data = 0
            for i, (imgs, anns) in enumerate(train_loader):
                num_data += len(imgs)
                num_train_batches += 1

                print(f'[{e + 1}/{num_epochs}] ', end='')
                print(f'({fold + 1}/{num_split}) ', end='')
                print(f'{num_data}/{int(len(dset) * (1 - 1 / num_split))}  ',
                      end='')

                x = make_batch(imgs).to(device)
                y_age = make_batch(anns, 'age_categorical').to(device)
                y_gender = make_batch(anns, 'gender_categorical').to(device)

                pred_age, pred_gender = model(x)
                loss_age = custom_weighted_focal_loss(pred_age, y_age,
                                                      weight_factor_age, 2)
                loss_gender = custom_focal_loss(pred_gender, y_gender, 2)

                loss = loss_age + loss_gender

                optimizer.zero_grad()
                loss_age.backward()
                optimizer.step()

                loss = loss.detach().cpu().item()
Beispiel #2
0
        num_data = 0
        num_batches = 0
        train_loss = 0
        train_acc_age = 0
        train_acc_gender = 0

        t_train_start = time.time()
        model.train()
        for i, (imgs, anns) in enumerate(train_loader):
            num_data += len(imgs)
            num_batches += 1

            print('[{}/{}] '.format(e + 1, num_epochs), end='')
            print('{}/{} '.format(num_data, n_train_data), end='')

            x = make_batch(imgs).to(device)
            # y_gender = make_batch(anns, 'gender').to(device)
            # y_age = make_batch(anns, 'age').to(device)
            y = make_batch(anns).to(device)

            # pred_age, pred_gender = model(x)
            # loss_age = custom_softmax_cross_entropy_loss(pred_age, y_age)
            # loss_gender = custom_softmax_cross_entropy_loss(pred_gender, y_gender)
            predict = model(x)
            loss = loss_func(predict, y, 5, .5)

            # loss = loss_age + loss_gender

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
                                    batch_size=batch_size,
                                    sampler=val_subsampler,
                                    collate_fn=custom_collate_fn)

            num_datas = 0
            t_train_start = time.time()
            for i, (imgs, anns) in enumerate(train_loader):
                t_batch_start = time.time()
                num_train_batches += 1
                num_datas += len(imgs)
                print(f'[{e + 1}/{num_epochs}] ', end='')
                print(f'({fold + 1}/{n_split} FOLD) ', end='')
                print(f'{num_datas}/{int(len(dset) * (1 - 1 / n_split))}  ',
                      end='')

                x = make_batch(imgs).to(device)

                predict_temp = model(x)
                predict_list = []
                y_list = []

                for b in range(len(anns)):
                    ground_truth_box = anns[b]['bbox']
                    label_categorical = anns[b]['label_categorical']

                    # print(f'{label_int} {label_categorical} {name} {fn} {ground_truth_box / 32}')

                    h_img, w_img = 416, 416
                    ratio_y, ratio_x = 1 / 32, 1 / 32
                    ground_truth_box = torch.as_tensor(ground_truth_box)
                    if len(ground_truth_box.shape) < 2:
    t_start = time.time()
    for e in range(num_epochs):
        loss_sum = 0
        emr_sum = 0
        num_batches = 0

        model.train()
        t_train_start = time.time()
        for i, (images, labels) in enumerate(train_loader):
            print('[{}/{}] '.format(e + 1, num_epochs), end='')
            print('{}/{} '.format((i + 1) * len(images), len(dset_train)),
                  end='')

            num_batches += 1

            x = make_batch(images).to(device)
            y = make_batch(labels, 'label').to(device)

            predict = model(x)
            predict = torch.nn.Sigmoid()(predict)

            optimizer.zero_grad()
            loss = loss_func(predict, y)
            loss.backward()
            optimizer.step()

            predict = (predict > .5)
            emr = exact_match_ratio(predict, y)

            loss_sum += loss.item()
            emr_sum += emr