Exemplo n.º 1
0
def get_pdfs(dataset: data.Dataset):
    pdf_x, pdf_t, pdf_xt = [Counter(), Counter(), Counter()]
    n_samples = dataset.__len__()

    for i in range(n_samples):
        (x, y) = dataset.__getitem__(i)
        print(x, y)
Exemplo n.º 2
0
def data_catogerize(data: Dataset, seed=1234):
    data_dict = {}
    for idx in range(len(data)):
        _, target = data.__getitem__(idx)
        if target not in data_dict:
            data_dict[target] = []
        data_dict[target].append(idx)
    rng = Random()
    rng.seed(seed)
    for key in data_dict.keys():
        rng.shuffle(data_dict[key])
    return data_dict
Exemplo n.º 3
0
def self_supervised(dataset: Dataset) -> Dataset:
    def duplicate(f):
        @wraps(f)
        def duplicated(index):
            x = f(index)
            if isinstance(x, tuple):
                x = x[0]
            return x, x

        return duplicated

    dataset.__getitem__ = duplicate(dataset.__getitem__)  # type: ignore
    return dataset
Exemplo n.º 4
0
def evaluate():
    model = MultiLayerNet(hidden_size=256, layers=2)
    model.load_state_dict(torch.load('weights/model_hl2_hs256_bs2000_bn.pth'))
    model = model.to(device)
    val_dataset = Dataset('dataset/test2.json')
    points, labels = val_dataset.__getitem__(2)
    points = torch.unsqueeze(points, dim=0).to(device)

    model.eval()
    predicted_scores = model(points)     
    sigmoid = nn.Sigmoid()
    predicted_scores = sigmoid(predicted_scores)
    ones = torch.ones_like(predicted_scores)
    zeros = torch.zeros_like(predicted_scores)
    predicted_lables = torch.where(predicted_scores >= 0.3, ones, zeros)
    print(predicted_scores)
    print(predicted_lables)
    print(labels)
        # If there is any transform method, apply it onto the image
        if self.transform:
            image = self.transform(image)

        return image, y


train_dataset = Dataset(csv_file=train_csv_file,
                        data_dir='data/training_data_pytorch/')
validation_data = Dataset(csv_file=validation_csv_file,
                          data_dir='data/validation_data_pytorch/')

samples = [53, 23, 10]

for i in samples:
    image, y = train_dataset.__getitem__(i)
    plt.imshow(image)
    plt.show()

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
composed = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

test_normalization = Dataset(csv_file=train_csv_file,
                             data_dir='data/training_data_pytorch/',
                             transform=composed)
Exemplo n.º 6
0
        model_conv1 = model_conv
        model_conv = model_conv.to(device)
        x = Image.open(
            'D:\\FinalProject\\input\\HAM10000_images_part_1\\ISIC_0024306.jpg'
        )
        image_tensor = test_transforms(x).float()
        image_tensor = image_tensor.unsqueeze_(0).to(device)
        output = model_conv1(image_tensor)
        index = output.data.cpu().numpy()
        print("Model {}".format(i))
        print(index)

        model_conv.eval()
        test_set = Dataset(validation_df, transform=composed)
        test_generator = data.SequentialSampler(validation_set)
        result_array = []
        gt_array = []
        for i in test_generator:
            data_sample, y = validation_set.__getitem__(i)
            #data_sample = data_sample.unsqueeze(0)
            data_gpu = data_sample.unsqueeze(0).to(device)
            #output = model_conv(data_sample)
            output = model_conv(data_gpu)
            result = torch.argmax(output)
            result_array.append(result.item())
            gt_array.append(y.item())
        correct_results = np.array(result_array) == np.array(gt_array)
        sum_correct = np.sum(correct_results)
        accuracy = sum_correct / test_generator.__len__()
        print(accuracy)
		   , 1: 'predicted: $10'
		   , 2: 'predicted: $20'
		   , 3: 'predicted: $50'
		   , 4: 'predicted: $100'
		   , 5: 'predicted $200'
		   , 6: 'predicted $500'}
random.seed(0)
numbers = random.sample(range(70), 5)

# Type your code here

# def plot_random_image(numbers):
model.eval()
count = 1
for i in numbers:
	img , y = val_dataset.__getitem__(idx = i)
	print("Image " + str(count))
	val_image_name = val_data_dir + str(i) + ".jpeg"
	image = Image.open(val_image_name)
	plt.imshow(image)
	plt.show()
	print(look_up[y])
	shape = list(img.size())
	img = img.view(1, shape[0], shape[1], shape[2])
	z = model(img)
	_,yhat = torch.max(z.data,1)
	count+= 1
	if yhat.item() == y:
		print(" **************Correctly classified************* ")
	else:
		print("**************Mis classified**************")