def tloaders(x_train,y_train,x_test,y_test,batch_size):
    train=tdata(x_train,y_train)
    test=tdata(x_test,y_test)
    train_loader=tdl(dataset=train,
                     batch_size=batch_size,shuffle=True)
    test_loader=tdl(dataset=test,
                    batch_size=batch_size,shuffle=False)
    return train_loader,test_loader
img_size=28
random_seed=12; batch_size1=256
train_ids=torch.arange(0,57000)
valid_ids=torch.arange(57000,60000)
trans=transforms\
.Compose([transforms.Resize((img_size,img_size)),
          transforms.ToTensor()])
train_valid=tmnist(root='data',train=True,
                   download=True,
                   transform=trans)
train1=Subset(train_valid,train_ids)
valid1=Subset(train_valid,valid_ids)
test1=tmnist(root='data',train=False, 
             transform=trans)
train_loader1=tdl(dataset=train1,shuffle=True, 
                  batch_size=batch_size1)
valid_loader1=tdl(dataset=valid1,shuffle=True, 
                  batch_size=batch_size1)
test_loader1=tdl(dataset=test1,shuffle=False, 
                 batch_size=batch_size1)

# Commented out IPython magic to ensure Python compatibility.
# %display_examples 1

fpath='https://olgabelitskaya.github.io/'
zf='LetterColorImages_123.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb'); 
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
示例#3
0
random_seed=12; batch_size=128
tr0=(.5); tr1=(.25); img_size=28
trans=transforms\
.Compose([transforms.Resize((img_size,img_size)),
          transforms.ToTensor(),
          transforms.Normalize(tr0,tr1)])
train_ids=torch.arange(0,54000)
valid_ids=torch.arange(54000,60000)
train_valid=tmnist(root='data',train=True,
                   download=True,transform=trans)
train=Subset(train_valid,train_ids)
valid=Subset(train_valid,valid_ids)
test=tmnist(root='data',train=False, 
            transform=trans)
train_loader=tdl(dataset=train,shuffle=True, 
                 batch_size=batch_size)
valid_loader=tdl(dataset=valid,shuffle=True, 
                 batch_size=batch_size)
test_loader=tdl(dataset=test,shuffle=False, 
                batch_size=batch_size)

display_examples(valid_loader,img_size)

fpath='https://olgabelitskaya.github.io/'
zf='LetterColorImages_123.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb'); 
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
zipf.extractall(''); zipf.close()
for img,lbl in raws.take(N):
    img=timage.resize(img,[img_size,img_size])
    images[i,:]=img.numpy()/255
    labels[i,:]=lbl; i+=1

[[x_train,x_valid,x_test],
 [y_train,y_valid,y_test]]=\
prepro_display(images,labels,img_size)

x_train.std()

random_seed=12; batch_size=128
train=TData(x_train,y_train)
valid=TData(x_valid,y_valid)
test=TData(x_test,y_test)
train_loader=tdl(dataset=train,shuffle=True,
                 batch_size=batch_size)
valid_loader=tdl(dataset=valid,shuffle=True,
                 batch_size=batch_size)
test_loader=tdl(dataset=test,shuffle=True,
                 batch_size=batch_size)
display_examples(valid_loader,img_size)

"""## VGG19"""

class VGG19(tnn.Module):
    def __init__(self,num_classes):
        super(VGG19,self).__init__()    
        self.block1=tnn.Sequential(
            tnn.Conv2d(in_channels=3,out_channels=64,
                       kernel_size=(3,3),stride=(1,1),
                       padding=1), 
示例#5
0
random_seed=12; batch_size=128
train_ids=torch.arange(0,44000)
valid_ids=torch.arange(44000,50000)
tr0=(.5,.5,.5)
trans=transforms\
.Compose([transforms.Resize((img_size,img_size)),
          transforms.ToTensor(),
          transforms.Normalize(tr0,tr0)])
train_valid=tcifar10(root='data',train=True,
                     download=True,
                     transform=trans)
train=Subset(train_valid,train_ids)
valid=Subset(train_valid,valid_ids)
test=tcifar10(root='data',train=False, 
              transform=trans)
dataloaders={'train':tdl(dataset=train,shuffle=True, 
                         batch_size=batch_size), 
             'valid':tdl(dataset=valid,shuffle=True, 
                         batch_size=batch_size),
             'test':tdl(dataset=test,shuffle=True, 
                        batch_size=batch_size)}

# Commented out IPython magic to ensure Python compatibility.
# %display_examples 1

fpath='https://olgabelitskaya.github.io/'
zf='LetterColorImages_123.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb'); 
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
ax[0].plot([x_min,x_max],[y_min,y_max],c='red')
ax[1].plot([x_min,x_max],[y_min,y_max],c='red')
ax[0].scatter(X_train[:,0],X_train[:,1],
              c=y_train,s=10,cmap=pl.cm.cool)
ax[1].scatter(X_test[:,0], X_test[:,1],
              c=y_test,s=10,cmap=pl.cm.cool)
ax[0].grid(); ax[1].grid()

"""## Softmax Regression"""

random_seed=23; batch_size=128
train=tmnist(root='data',train=True,download=True,
            transform=transforms.ToTensor())
test=tmnist(root='data',train=False, 
            transform=transforms.ToTensor())
train_loader=tdl(dataset=train,shuffle=True, 
                 batch_size=batch_size)
test_loader=tdl(dataset=test,shuffle=False, 
                batch_size=batch_size)
for images,labels in train_loader:  
    print('Image dimensions: %s'%str(images.shape))
    print('Label dimensions: %s'%str(labels.shape))
    break

learning_rate=.1; epochs=15
num_features=784; num_classes=10
class SoftmaxRegression(torch.nn.Module):
    def __init__(self,num_features,num_classes):
        super(SoftmaxRegression,self).__init__()
        self.linear=torch.nn.Linear(num_features,num_classes)        
        self.linear.weight.detach().zero_()
        self.linear.bias.detach().zero_()     
                print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f' 
#                        %(epoch+1,epochs,batch_ids, 
                         len(train2)//batch_size2,cost))           
        with torch.set_grad_enabled(False):
            print('Epoch: %03d/%03d train accuracy: %.2f%%'%\
                  (epoch+1,epochs,model_acc(model,train_loader2,
                                            num_features2)))

"""## Data"""

random_seed=1; batch_size=64
train=tmnist(root='data',train=True,download=True,
            transform=transforms.ToTensor())
test=tmnist(root='data',train=False, 
            transform=transforms.ToTensor())
train_loader=tdl(dataset=train,shuffle=True, 
                 batch_size=batch_size)
test_loader=tdl(dataset=test,shuffle=False, 
                batch_size=batch_size)
for images,labels in train_loader:  
    print('Image dimensions: %s'%str(images.shape))
    print('Label dimensions: %s'%str(labels.shape))
    break

fpath='https://olgabelitskaya.github.io/'
zf='LetterColorImages_123.h5.zip'
input_file=urllib.request.urlopen(fpath+zf)
output_file=open(zf,'wb'); 
output_file.write(input_file.read())
output_file.close(); input_file.close()
zipf=zipfile.ZipFile(zf,'r')
grayscale=False; img_size=120
trans=transforms\
.Compose([transforms.Resize((128,128)),
          transforms.RandomCrop((img_size,img_size)),
          transforms.ToTensor()])
trans2=transforms\
.Compose([transforms.Resize((128,128)),
          transforms.CenterCrop((img_size,img_size)),
          transforms.ToTensor()])
train=AFADAgeData(csv_path=train_csv,
                  img_dir=img_path,
                  transform=trans)
test=AFADAgeData(csv_path=test_csv,
                 img_dir=img_path,
                 transform=trans2)
dataloaders={'train':tdl(dataset=train,batch_size=batch_size,
                         shuffle=True,num_workers=num_workers),
             'test':tdl(dataset=test,batch_size=batch_size,
                        shuffle=True,num_workers=num_workers)}

# Commented out IPython magic to ensure Python compatibility.
# %display_examples test

dhtml('Ordinal Regression CNN')

def conv31(in_planes,out_planes,stride=1):
    return tnn.Conv2d(in_planes,out_planes,
                      kernel_size=3,stride=stride,
                      padding=1,bias=False)
def cost_fit(targets,predictions):
    return torch.mean((targets.float()-predictions)**2)
def mae_mse(model,data_loader):
x = timage.resize(x, [img_size, img_size])
x = x.numpy().reshape(-1, 3, img_size, img_size)
x = x / 255
print(x.mean(), x.std())
y = np.array(f[keys[2]], dtype='int32') - 1
N = len(y)
n = int(.1 * N)
shuffle_ids = np.arange(N)
np.random.RandomState(23).shuffle(shuffle_ids)
x, y = x[shuffle_ids][:110 * 128], y[shuffle_ids][:110 * 128]

random_seed = 23
batch_size = 128
train = TData(x, y)
dataloaders = {
    'train': tdl(dataset=train, shuffle=True, batch_size=batch_size)
}

# Commented out IPython magic to ensure Python compatibility.
# %display_examples train

dhtml('DCGAN')


def weights_init(module):
    classname = module.__class__.__name__
    if classname.find('Conv') != -1:
        tnn.init.normal_(module.weight.data, 0., .02)
    elif classname.find('BatchNorm') != -1:
        tnn.init.normal_(module.weight.data, 1., .02)
        tnn.init.constant_(module.bias.data, 0)