Example #1
0
import settings
from models import NeuralNetworkModel

import logging
logging.basicConfig(level=logging.INFO)

IMAGE_SIZE = 100
SHOW_BAR = False
MODEL_PATH = "./saved_model/model.pt"
data_dir = "./dataset"


#root = '/home/andrei/Data/Datasets/Scales/classifier_dataset_181018/'
#data_dir = '/w/WORK/ineru/06_scales/_dataset/splited/'

dataloaders, image_datasets = data_factory.load_data(data_dir)
#data_parts = list(dataloaders.keys())
dataset_sizes, class_names = data_factory.dataset_info(image_datasets)
num_classes = len(class_names)
data_parts = ['train', 'valid']

num_batch = dict()
num_batch['train'] = math.ceil(dataset_sizes['train'] / settings.batch_size)
num_batch['valid'] = math.ceil(dataset_sizes['valid'] / settings.batch_size)
print('train_num_batch:', num_batch['train'])
print('valid_num_batch:', num_batch['valid'])

#print(data_parts)
#print('train size:', dataset_sizes['train'])
#print('valid size:', dataset_sizes['valid'])
#print('classes:', class_names)
Example #2
0
        )
    # print(y)
    # print(y_target[0])
    print("")


if __name__ == "__main__":
    np.random.seed(0)
    torch.manual_seed(0)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # prepare dataset
    width, height = 64, 64
    generate_layer_data(n_sample=100)
    train_label, train_bbox, train_im, test_label, test_bbox, test_im = load_data(
    )
    train_dataset = utils.TensorDataset(
        torch.from_numpy(train_im),
        torch.from_numpy(train_label),
        torch.from_numpy(train_bbox),
    )
    train_dataloader = utils.DataLoader(train_dataset,
                                        batch_size=8,
                                        shuffle=True)

    # test_dataset = utils.TensorDataset(test_tensor_y, test_tensor_x)
    # test_dataloader = utils.DataLoader(test_dataset, batch_size=32, shuffle=False)


    def _loss(y, y_target_class, y_target_bbox):
        cls_score, bbox = y