Esempio n. 1
0
    def _get_data_iter(self) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]:
        transform = transforms.Compose([
            # transforms.Resize((150, 200), Image.ANTIALIAS),
            transforms.Resize((227, 227), Image.ANTIALIAS),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        dataset = CatDogDataset(root="./data/train/", transform=transform)
        used_data_num = self.ct.train_data_num + self.ct.valid_data_num + self.ct.test_data_num
        data_train, data_valid, data_test, data_unused = random_split(dataset, [self.ct.train_data_num, self.ct.valid_data_num, self.ct.test_data_num, len(dataset)-used_data_num])

        iter_train = DataLoader(data_train, batch_size=self.ct.train_batch_size, shuffle=True)
        iter_train_eval = DataLoader(data_train, batch_size=self.ct.eval_batch_size, shuffle=False)
        iter_valid = DataLoader(data_valid, batch_size=self.ct.eval_batch_size, shuffle=False)
        iter_test = DataLoader(data_test, batch_size=self.ct.eval_batch_size, shuffle=False)

        return iter_train, iter_train_eval, iter_valid, iter_test
Esempio n. 2
0
image_row_size = image_size[0] * image_size[1]
n_features = 4

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
    transforms.Resize(image_size),
    transforms.Grayscale(),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1))
])
#,transforms.Normalize(mean, std)])

path = './Cat_Dog_data/train'
path1 = './Cat_Dog_data/test'
train = CatDogDataset(path, transform=transform)
test = CatDogDataset(path1, transform=transform)

shuffle = True
batch_size = 64
num_workers = 1

train_loader = DataLoader(dataset=train,
                          shuffle=shuffle,
                          batch_size=batch_size,
                          num_workers=num_workers)
test_loader = DataLoader(dataset=test,
                         shuffle=shuffle,
                         batch_size=batch_size,
                         num_workers=num_workers)
Esempio n. 3
0
mean = [0.485, 0.456, 0.406]
std  = [0.229, 0.224, 0.225]
transform = transforms.Compose([
                                transforms.Resize(image_size), 
                                transforms.Grayscale(),
                                transforms.ToTensor(), 
                                transforms.Lambda(lambda x: x.repeat(3,1,1)),
                                transforms.Normalize(mean, std)])


#Create Dataset


path    = '/home/aims/aims-courses/Deep learning/asign/data/train/'
dataset = CatDogDataset(path, transform=transform)

path1 = '/home/aims/aims-courses/Deep learning/asign/data/val/'
test1 = CatDogDataset(path1, transform=transform)


# In[7]:



#Create DataLoader
shuffle     = True
batch_size  = 32
num_workers = 0
dataloader  = DataLoader(dataset=dataset, 
                         shuffle=shuffle, 
    
    class CatDogConfig(Settings):
        pass

    config = CatDogConfig("settings")

# import dataset and its preprocessing utilities
from dataset import Preprocess_img, read_img, CatDogDataset
import logging
logging.basicConfig(level=logging.INFO)

# output path
EXPORTED_PATH="preprocessor.json" # copy preprocessor.json to the S3 `${target_url}/{VERSION}/model/`

preprocessor = Preprocess_img.load_from(EXPORTED_PATH)
cat_dog_dataset = CatDogDataset()

class dogcat_service(TfServingBaseService):
  # Changed to match the model input preprocessing
  def _preprocess(self, data):
    preprocessed_data = {}
    for k, v in data.items():
      for file_name, file_content in v.items():
        img = read_img(file_content)
        img, _ = preprocessor(img)
        img = img[np.newaxis, :, :, :]
        preprocessed_data[k] = img
    return preprocessed_data

  def _postprocess(self, data):
    outputs = {}
def Program(raw_args):
    FLAGS = parse_args(raw_args)

    # update config object
    config.EPOCHS = FLAGS.max_epochs
    config.BATCH_SIZE = FLAGS.batch_size
    config.GPUS = FLAGS.num_gpus

    # sync files from S3 to local storage unit
    if CloudAPI is not None:
        CloudAPI.copy_parallel(FLAGS.data_url, config.DATA_DIR)

    # Load Models
    SAVER = "{}/catdog".format(config.OUTPUT_DIR)

    if not os.path.isdir(SAVER):
        os.makedirs(SAVER)

    model = VGG16("training", config, SAVER)
    logging.info(model.summary())

    # Load pretrained weights, see `notebooks/ModelArts-Explore_ex1`
    check_point = "{}/weights.best.checkpoint.hdf5".format(SAVER)
    if os.path.isfile(check_point):
        model.load_weights()
    else:
        model.load_weights(config.CAT_DOG_PRETRAINED_MODEL)

    # Prepare data
    from dataset import CatDogDataset, Preprocess_img
    cat_dog_dataset = CatDogDataset(name=FLAGS.dataset_name)

    cat_dog_dataset.load_dataset()
    X_train, y_train = cat_dog_dataset.train_reader()
    X_test, _ = cat_dog_dataset.validation_reader()

    # Trainning
    start = timeit.default_timer()
    # For large dataset, we prefer to use SGD to digest dataset quickly
    model.fit(X_train, y_train, optimizer_type="sgd")
    elapsed = timeit.default_timer() - start
    logging.info("Trainnig complete, elapsed: %s(s)" % elapsed)

    predictions = []
    detected = model.infer(X_test)

    for ret in detected:
        predictions.append(np.argmax(ret))

    df = pd.DataFrame({
        'data': cat_dog_dataset.test_data,
        'labels': cat_dog_dataset.test_labels,
        'prediction': predictions
    })

    print("evaluation snapshot, top 10: ", df.head(10))

    acc = accuracy_score(cat_dog_dataset.test_labels, predictions)

    print('训练得到的猫狗识别模型的准确度是-pure VGG16:', acc)

    # save accuracy to a local file
    metric_file_name = os.path.join(SAVER, 'metric.json')
    metric_file_content = """
{"total_metric": {"total_metric_values": {"accuracy": %0.4f}}}
    """ % acc

    with open(metric_file_name, "w") as f:
        f.write(metric_file_content)

    model_proto = "{}/model".format(SAVER)
    if os.path.isdir(model_proto):
        os.system('rm -rf %s' % model_proto)
    save_keras_model_to_serving(model.model, model_proto)

    EXPORTED_PATH = "{}/model/preprocessor.json".format(SAVER)
    logging.info("persist preprocessor data to %s" % EXPORTED_PATH)
    cat_dog_dataset.preprocessor.save(EXPORTED_PATH)

    # copy local output to remote S3 storage unit
    if CloudAPI is not None:
        CloudAPI.copy_parallel(SAVER, FLAGS.train_url)

    # check
    preprocessor = Preprocess_img()
    preprocessor.load_from(EXPORTED_PATH)
Esempio n. 6
0

image_size = (64, 64)
image_row_size = image_size[0] * image_size[1] * 3

mean = [0.485, 0.456, 0.406]
std  = [0.229, 0.224, 0.225]
transform = transforms.Compose([
                                transforms.Resize(image_size),
                                transforms.ToTensor(),
                                transforms.Normalize(mean, std)])



path    = '/home/aims/Downloads/cat-and-dog/'
train_data = CatDogDataset(path+'training_set/training_set/',  transform=transform)
test_data = CatDogDataset(path+'test_set/test_set/',  transform=transform)


net =  CNN()


train_loader = torch.utils.data.DataLoader(train_data, batch_size=64,
                                          shuffle=True, num_workers=4)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=64,
                                         shuffle=False, num_workers=4)


criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
Esempio n. 7
0
    test_data_transforms = transforms.Compose([
        transforms.Scale(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    use_gpu = torch.cuda.is_available()

    batch_size = 32
    num_workers = 8

    test_file = '/Users/demerzel/PycharmProjects/cat-dog/data/test.txt'

    test_dataset = CatDogDataset(file_path=test_file,
                                 model='test',
                                 data_transforms=test_data_transforms)

    test_dataloader = DataLoader(test_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=num_workers)

    test_datasize = len(test_dataset)

    num_classes = 2

    model = AlexNet(num_classes=2)
    model = torch.load('./output/epoch_70.pkl')

    if use_gpu:
Esempio n. 8
0
from dataset import CatDogDataset
import matplotlib.pyplot as plt

dir_images = 'train/sample'


def mostrarImagen(dataset, nroImagen):
    imagen, etiqueta = dataset[nroImagen]
    imagen = imagen.numpy()
    imagen = imagen.transpose(1, 2, 0)
    print(etiqueta)
    plt.imshow(imagen)
    plt.title(etiqueta)
    plt.show()


catdog_dataset = CatDogDataset(data_dir=dir_images)

mostrarImagen(catdog_dataset, 2)
Esempio n. 9
0
    return np


image_size = (224, 224)
image_row_size = image_size[0] * image_size[1]

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

path = 'train'
dataset = CatDogDataset(path, transform=transform)
path_test = 'test'
dataset = CatDogDataset(path, transform=transform)
data_test = CatDogDataset(path_test, transform=transform)

shuffle = True
batch_size = 20
num_workers = 0
train_loader = DataLoader(dataset=dataset,
                          shuffle=shuffle,
                          batch_size=batch_size,
                          num_workers=num_workers)

test_loader = DataLoader(dataset=data_test,
                         shuffle=False,
                         batch_size=batch_size,
Esempio n. 10
0
args = parser.parse_args()

# Configuración
epochs = args.epoch  # cantidad de épocas (iteraciones)
batch_size = 84  # cantidad de archivos entran por batch de entrenamiento
test_proportion = .2  # proporción de archivos a usar de test (ej: 20%)
validation_proportion = .1  # proporción de archivos a usar de test (ej: 10%)
img_size = 32  # tamaño de resize para aplicarle al dataset (ej: 32x32 px)
padding_mode = args.padding  # tipo de padding para generar imágenes cuadradas
dataset_path = args.path  # path de las imágenes
FILENAME_MODEL = 'gato_{:d}.pt'.format(epochs)
FILENAME_MODEL2 = 'gato_{:d}_model.pt'.format(epochs)

# Datasets
catdog_dataset = CatDogDataset(data_dir=dataset_path,
                               img_size=img_size,
                               padding_mode=padding_mode)
len_dataset = len(catdog_dataset)

test_size = int(test_proportion * len_dataset)
validation_size = int(validation_proportion * len_dataset)
train_size = len_dataset - test_size - validation_size

train_dataset, test_dataset, validation_dataset = torch.utils.data.random_split(
    catdog_dataset, [train_size, test_size, validation_size])

print("--- Configuración inicial ---")
print('Epochs      : {:d}'.format(epochs))
print('Batch Size  : {:d}'.format(batch_size))
print('Dataset     : {:d}'.format(len_dataset))
print('Dataset path: {:s}/'.format(dataset_path))
Esempio n. 11
0
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    use_gpu = torch.cuda.is_available()

    batch_size = 32
    num_workers = 8

    train_file = '/Users/demerzel/PycharmProjects/cat-dog/data/train.txt'
    valid_file = '/Users/demerzel/PycharmProjects/cat-dog/data/valid.txt'
    test_file = '/Users/demerzel/PycharmProjects/cat-dog/data/test.txt'

    train_dataset = CatDogDataset(file_path=train_file,
                                  model='train',
                                  data_transforms=train_data_transforms)
    valid_dataset = CatDogDataset(file_path=valid_file,
                                  model='train',
                                  data_transforms=test_data_transforms)
    test_dataset = CatDogDataset(file_path=test_file,
                                 model='test',
                                 data_transforms=test_data_transforms)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
Esempio n. 12
0
        np += p.nelement()
    return np


mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
    transforms.Resize(image_size),
    transforms.Grayscale(),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
    transforms.Normalize(mean, std)
])

path = '/home/aims/Documents/Tutors/Nando_assignment/data 1/train'
dataset = CatDogDataset(path, transform=transform)

path1 = '/home/aims/Documents/Tutors/Nando_assignment/data 1/val'
dataset1 = CatDogDataset(path1, transform=transform)

### Train
l1 = dataset.__len__()

print('the len of the train data is \n {}'.format(l1))

### Test
l2 = dataset1.__len__()

print('the len of the train data is \n {}'.format(l2))

### For the train data