def ResSENet101(num_classes = 10):
    '''
    First conv layer: 1
    4 residual blocks with [3, 4, 23, 3] sets of three convolutions each: 3*3 + 4*3 + 23*3 + 3*3 = 99
    last FC layer: 1
    Total layers: 1+99+1 = 101
    '''
    return ResNet(ResidualSEBasicBlock, [3, 4, 23, 3], num_classes)
def ResSENet50(num_classes = 10):
    '''
    First conv layer: 1
    4 residual blocks with [3, 4, 6, 3] sets of three convolutions each: 3*3 + 4*3 + 6*3 + 3*3 = 48
    last FC layer: 1
    Total layers: 1+48+1 = 50
    '''
    return ResNet(ResidualSEBasicBlock, [3, 4, 6, 3], num_classes)
def ResSENet152(num_classes = 10):
    '''
    First conv layer: 1
    4 residual blocks with [3, 8, 36, 3] sets of three convolutions each: 3*3 + 8*3 + 36*3 + 3*3 = 150
    last FC layer: 1
    Total layers: 1+150+1 = 152
    '''
    return ResNet(ResidualSEBasicBlock, [3, 8, 36, 3], num_classes)
def ResNet18(num_classes=10):
    '''
    First conv layer: 1
    4 residual blocks with two sets of two convolutions each: 2*2 + 2*2 + 2*2 + 2*2 = 16 conv layers
    last FC layer: 1
    Total layers: 1+16+1 = 18
    '''
    return ResNet(BasicBlock, [2, 2, 2, 2], num_classes)
def ResNet34(num_classes):
    '''
    First conv layer: 1
    4 residual blocks with [3, 4, 6, 3] sets of two convolutions each: 3*2 + 4*2 + 6*2 + 3*2 = 32
    last FC layer: 1
    Total layers: 1+32+1 = 34
    '''
    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes)
    with torch.no_grad():
        for i, j in loader:
            inputs, labels = i.to(device), j.to(device)
            outputs = model(inputs)
            correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
named_layers = []
for i in net.named_parameters():
    named_layers.append(i[0])
opt = SwatsLocal(net.parameters(), named_layers, lr=0.001)
loss = nn.CrossEntropyLoss().type(dtype)


def adjust_lr(opt, epochs):
    base_lr = 0.001
    if epochs >= 75:
        for ui in opt.param_groups:
            ui['div_lr_decay'] = 10
    if epochs >= 150:
        for ui in opt.param_groups:
            ui['div_lr_decay'] = 100
def ResSENet18(num_classes = 10):
    return ResNet(ResidualSEBasicBlock, [2, 2, 2, 2], num_classes)
Example #8
0
def Resnet101(num_classes=1):
    model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
    return model
Example #9
0
def CBAMResnet101(num_classes=1):
    model = ResNet(CBAMBottleneck, [3, 4, 23, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
Example #10
0
def SEResnet50(num_classes=1):
    model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model
    with torch.no_grad():
        for i, j in loader:
            inputs, labels = i.to(device), j.to(device)
            outputs = model(inputs)
            correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
opt = SwatsVanillaGlobal(net.parameters(), lr=0.001)
loss = nn.CrossEntropyLoss().type(dtype)


def adjust_lr(opt, epochs):
    base_lr = 0.001
    if epochs >= 100:
        for ui in opt.param_groups:
            ui['lr_decay'] = 10


total_epochs = 200
train_loss = []
train_acc = []
test_acc = []
    with torch.no_grad():
        for i, j in loader:
            inputs, labels = i.to(device), j.to(device)
            outputs = model(inputs)
            correct = correct + accuracy(outputs, labels)
        print("[%d/%d] Test Accuracy : %f" %
              (epochs, total_epochs, (correct / len(loader.dataset)) * 100))
        print(
            '---------------------------------------------------------------------'
        )
    return (correct / len(loader.dataset)) * 100


dtype = torch.cuda.FloatTensor
torch.manual_seed(52)
net = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
opt = AdaBound(net.parameters(), lr=0.001, final_lr=0.1)
loss = nn.CrossEntropyLoss().type(dtype)
scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=100, gamma=0.1)

total_epochs = 200
train_loss = []
train_acc = []
test_acc = []
for s in range(1, total_epochs + 1):
    a, b = train(net, s, trainloader)
    c = test(net, s, testloader)
    train_loss.append(a)
    train_acc.append(b)
    test_acc.append(c)
    scheduler.step()
Example #13
0
from Resnet import ResNet


# Constants
# Setting
SEED = 222
BATCH_SIZE = 64
HEIGHT = 137
WIDTH = 236
device = "cuda" if torch.cuda.is_available() else "cpu"

# Call seed to setup for repeatable results
seed_everything(SEED)

# Create Model
model = ResNet().to(device)

# Setup data files to load
data_dir = '../data'
files_train = [f'train_image_data_{fid}.parquet' for fid in range(4)]

# Predict Test values
model.eval()
row_id = []
target = []
dataset = BenDataset(HEIGHT, WIDTH)

for i in range(4):
    # Do training here
    parquet_df = pd.read_parquet(F'../data/train_image_data_{i}.parquet',
                                  engine='pyarrow')
from BengaliDataset import BengaliDataset
from Resnet import ResNet
from Utils import seed_everything

# Constants
# Setting
SEED = 222
BATCH_SIZE = 64
HEIGHT = 137
WIDTH = 236
device = "cuda" if torch.cuda.is_available() else "cpu"

seed_everything(SEED)

# Create Model
model = ResNet().to(device)
model_path = '../input/resnet18/resnet_saved_weights.pth'

# load model from dict to create test output csv
model.load_state_dict(torch.load(model_path))

# Get Parquet files for Testing
data_dir = '../input/bengaliai-cv19'
files_test = [f'test_image_data_{fid}.parquet' for fid in range(4)]

# Predict Test values
model.eval()
row_id = []
target = []

for fname in files_test:
Example #15
0
 def __init__(self):
     self.resnet_head = ResNet()
     self.input = tf.placeholder(tf.float32, shape=(None, 400, 400, 3))
     self.labels = tf.placeholder(tf.float32, shape=(None, ))
     self.output = self.forward(self.input)
     self.lr = 0.0001
Example #16
0
class Retrieval(object):
    def __init__(self):
        self.resnet_head = ResNet()
        self.input = tf.placeholder(tf.float32, shape=(None, 400, 400, 3))
        self.labels = tf.placeholder(tf.float32, shape=(None, ))
        self.output = self.forward(self.input)
        self.lr = 0.0001

    def forward(self, X):
        for layer in self.resnet_head.layers:
            X = layer.forward(X)
        X = tf.nn.l2_normalize(X, axis=1)
        return X

    def fit(self, batch_size, traingen, valgen, contrastive_obj, keras_model):
        N = len(os.listdir("../Data/Augmented_data/Images/"))

        init = tf.variables_initializer(self.resnet_head.get_params())
        session = keras.backend.get_session()
        self.resnet_head.set_session(session)
        session.run(init)
        self.session = session
        self.resnet_head.copyFromKerasLayers(keras_model.layers)

        embedding = self.forward(self.input)
        tf.add_to_collection("embedding", embedding)
        saver = tf.train.Saver()

        margin = 0.4

        anchor_left, anchor_right = contrastive_obj.pair_combos(embedding)
        labels = contrastive_obj.get_binaray_labels(self.labels)
        first_part, second_part, cost = contrastive_obj.contrastive_loss(
            labels, anchor_left, anchor_right, margin)

        optimizer = tf.train.AdamOptimizer(self.lr)
        trainin_op = optimizer.minimize(cost)  # current best 0.0001
        epoch = 50
        n_batches = N // batch_size

        LL_train = []
        LL_val = []
        #self.session.run(tf.variables_initializer(optimizer.variables())) # Gives a wierd cost on batches
        self.session.run(tf.global_variables_initializer())
        for i in range(epoch):
            for j in range(n_batches):
                X, Y, _ = next(traingen)
                self.session.run(trainin_op,
                                 feed_dict={
                                     self.input: X,
                                     self.labels: Y
                                 })

                if j % 100 == 0:
                    loss_train = self.session.run(cost,
                                                  feed_dict={
                                                      self.input: X,
                                                      self.labels: Y
                                                  })
                    Xval, Yval, val_name = next(valgen)
                    loss_val = self.session.run(cost,
                                                feed_dict={
                                                    self.input: Xval,
                                                    self.labels: Yval
                                                })
                    LL_val.append(loss_val)
                    LL_train.append(loss_train)
                    print(
                        " epoch %d of %d iteration %d of %d , val_loss is %.6f"
                        % (i, epoch - 1, j, n_batches, loss_val))
                    print(
                        " epoch %d of %d iteration %d of %d , train_loss is %.6f"
                        % (i, epoch - 1, j, n_batches, loss_train))
        if not os.path.exists("./Models/Resnet50-plain"):
            os.makedirs("./Models/Resnet50-plain")
        saver.save(self.session, "./Models/Resnet50-plain/Resnet50-plain")

        fig = plt.figure("Resnet trained from scratch")
        plt.plot(LL_train, label="train_cost")
        plt.plot(LL_val, label="val_cost")
        plt.legend()
        if not os.path.exists("./Plots"):
            os.makedirs("./Plots")
        fig.savefig("./Plots/resnet_train",
                    transparent=True,
                    bbox_inches="tight",
                    pad_inches=0)