示例#1
0
def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument(
        '--noise',
        type=float,
        metavar='NOISE',
        help=
        'the amount of noise to add in the gradient of the clustNet (in percentage)(default: 0.1)'
    )
    argreader.parser.add_argument(
        '--entweig',
        type=float,
        default=0,
        metavar='ENTWEI',
        help=
        'the weight of the clusters entropy term in the cost function (default: 0)'
    )
    argreader.parser.add_argument(
        '--clustdivers',
        type=float,
        default=0,
        metavar='ENTWEI',
        help=
        'the weight of the clusters diversity term in the cost function (default: 0)'
    )
    argreader.parser.add_argument(
        '--filter_dis',
        type=float,
        default=0,
        metavar='FILDIS',
        help=
        'the weight of the filter distance term in the cost function (default: 0)'
    )

    argreader.parser.add_argument(
        '--featmap_entr',
        type=float,
        default=0,
        metavar='FEATENT',
        help=
        'the weight of the feature map entropy term in the cost function (default: 0)'
    )
    argreader.parser.add_argument(
        '--featmap_var',
        type=float,
        default=0,
        metavar='FEATVAR',
        help=
        'the weight of the feature map var term in the cost function (default: 0)'
    )

    argreader.parser.add_argument(
        '--optim',
        type=str,
        default="SGD",
        metavar='OPTIM',
        help='the optimizer algorithm to use (default: \'SGD\')')
    argreader.parser.add_argument(
        '--noise_init',
        type=float,
        default="0",
        metavar='NOISEINIT',
        help=
        'The percentage of noise to add (relative to the filter norm) when initializing detectNets with \
                        a pre-trained detectNet')

    argreader.parser.add_argument(
        '--reverse_target',
        type=str2bool,
        default="False",
        help=
        'To inverse the positive and the negative class. Useful to train a detectNet \
                        which will be later used to produce negative feature map'
    )

    argreader.parser.add_argument(
        '--clu_train_mode',
        type=str,
        default='joint',
        metavar='TRAINMODE',
        help=
        'Determines the cluster training mode. Can be \'joint\' or \'separated\' (default: \'joint\')'
    )

    argreader.parser.add_argument('--rand_prop_val_sched',
                                  type=float,
                                  nargs='+',
                                  default=[0.9, 0.5, 0.1],
                                  metavar='RANDPROP_VAL_SCHED',
                                  help=')')
    argreader.parser.add_argument('--rand_prop_epo_sched',
                                  type=int,
                                  nargs='+',
                                  default=[0, 1, 2],
                                  metavar='RANDPROP_EPO_SCHED',
                                  help=')')

    argreader.parser.add_argument(
        '--init',
        type=str,
        default=None,
        metavar='N',
        help='the weights to use to initialize the detectNets')
    argreader.parser.add_argument(
        '--init_clu',
        type=str,
        default=None,
        metavar='N',
        help='the weights to use to initialize the clustNets')
    argreader.parser.add_argument(
        '--init_enc',
        type=str,
        default=None,
        metavar='N',
        help='the weights to use to initialize the encoder net')
    argreader.parser.add_argument(
        '--init_pos',
        type=str,
        default=None,
        metavar='N',
        help=
        'the weights to use to initialize the positive detectnets. Ignored when not training a full clust detect net'
    )
    argreader.parser.add_argument(
        '--init_neg',
        type=str,
        default=None,
        metavar='N',
        help=
        'the weights to use to initialize the negative detectNets. Ignored when not training a full clust detect net'
    )

    argreader.parser.add_argument(
        '--encapplyDropout2D',
        default=True,
        type=str2bool,
        metavar='N',
        help='whether or not to apply 2D dropout in the preprocessing net')

    #Reading the comand line arg
    argreader.getRemainingArgs()

    #Getting the args from command line and config file
    args = argreader.args

    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    if args.clust < 2:
        raise ValueError(
            "The number of cluster must be at least 2. Got {}".format(
                args.clust))
    train_loader, test_loader = dataLoader.loadData(args.dataset,
                                                    args.batch_size,
                                                    args.test_batch_size,
                                                    args.cuda,
                                                    args.num_workers)

    #The group of class to detect
    np.random.seed(args.seed)
    classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    np.random.shuffle(classes)
    classToFind = classes[0:args.clust]

    #The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))
    if not (os.path.exists("../results/{}".format(args.exp_id))):
        os.makedirs("../results/{}".format(args.exp_id))
    if not (os.path.exists("../nets/{}".format(args.exp_id))):
        os.makedirs("../nets/{}".format(args.exp_id))

    if args.pretrain:
        netType = "detectNet"
    elif args.pretrain_cae:
        netType = "cae"
    else:
        netType = "clustDetectNet"

    #Write the arguments in a config file so the experiment can be re-run
    argreader.writeConfigFile("../nets/{}/{}{}.ini".format(
        args.exp_id, netType, args.ind_id))

    #Building the net
    net = netBuilder.netMaker(args)

    if args.cuda:
        net.cuda()

    startEpoch = initialize_Net_And_EpochNumber(net,args.pretrain,args.init,args.init_clu,args.init_enc,args.init_pos,args.init_neg,\
                                                args.exp_id,args.ind_id,args.cuda,args.noise_init,netType)

    net.classToFind = classToFind

    #Getting the contructor and the kwargs for the choosen optimizer
    optimConst, kwargs = get_OptimConstructor_And_Kwargs(
        args.optim, args.momentum)

    #If no learning rate is schedule is indicated (i.e. there's only one learning rate),
    #the args.lr argument will be a float and not a float list.
    #Converting it to a list with one element makes the rest of processing easier
    if type(args.lr) is float:
        args.lr = [args.lr]

    if type(args.lr_cl) is float:
        args.lr_cl = [args.lr_cl]

    if (not args.pretrain) and (not args.pretrain_cae):

        #Adding a hook to add noise at every weight update
        if args.noise != 0:
            gradNoise = GradNoise(ampl=args.noise)
            for p in net.getClustWeights():
                p.register_hook(gradNoise)

        #Train and evaluate the clustering detecting network for several epochs
        lrCounter = 0

        for epoch in range(startEpoch, args.epochs + 1):

            #This condition determines when the learning rate should be updated (to follow the learning rate schedule)
            #The optimiser have to be rebuilt every time the learning rate is updated
            if (epoch - 1) % (
                (args.epochs + 1) // len(args.lr)) == 0 or epoch == startEpoch:

                kwargs['lr'] = args.lr[lrCounter]
                print("Learning rate : ", kwargs['lr'])
                optimizerDe = optimConst(net.getDetectWeights(), **kwargs)

                kwargs['lr'] = args.lr_cl[lrCounter]
                print("Learning rate of clustNet: ", kwargs['lr'])
                optimizerCl = optimConst(net.getClustWeights(), **kwargs)

                if lrCounter < len(args.lr) - 1:
                    lrCounter += 1

            train(net, optimizerCl, optimizerDe, train_loader, epoch, args,
                  classToFind)
            test(net, test_loader, epoch, args, classToFind)

    else:
        print("Pretraining")

        if args.pretrain_cae:
            trainFunc = trainCAE
            testFunc = testCAE
            kwargsFunc = {}
        else:
            trainFunc = trainDetect
            testFunc = testDetect
            kwargsFunc = {"classToFind": classToFind}

        #Train and evaluate the detecting network for several epochs
        lrCounter = 0
        for epoch in range(startEpoch, args.epochs + 1):

            if (epoch - 1) % (
                (args.epochs + 1) // len(args.lr)) == 0 or epoch == startEpoch:

                kwargs['lr'] = args.lr[lrCounter]
                print("Learning rate : ", kwargs['lr'])
                optimizerDe = optimConst(net.parameters(), **kwargs)

                if lrCounter < len(args.lr) - 1:
                    lrCounter += 1

            trainFunc(net, optimizerDe, train_loader, epoch, args,
                      **kwargsFunc)
            testFunc(net, test_loader, epoch, args, **kwargsFunc)
示例#2
0
def featureMapVariance(args):

    torch.manual_seed(args.seed)

    _,test_loader = dataLoader.loadData(dataset=args.dataset,batch_size=args.batch_size,test_batch_size=args.test_batch_size,cuda=False)

    netId=args.feat_map_var[0]
    layNb=args.feat_map_var[1]

    #Get and sort the experiment file
    weigFiles = sortExperiFiles("../nets/"+args.exp_id+"/clustDetectNet"+str(netId)+"_epoch*".format(args.exp_id),netNumber=1)
    paramDictPath = "../nets/"+str(args.exp_id)+"/clustDetectNet"+str(netId)+".ini"

    #Getting the dataset and the boolean parameter inweig
    #Assuming the all the net in the exp have the same dataset
    #and the same value for the boolean parameter inweig
    config = configparser.ConfigParser()
    config.read(paramDictPath)

    batch_nb = len(test_loader.dataset)//args.test_batch_size

    #Updating args with the argument in the config file
    argsDict = vars(args)
    for key in config['default']:

        if key in argsDict:
            if not argsDict[key] is None:
                cast_f = type(argsDict[key])

                if cast_f is bool:
                    cast_f = lambda x:True if x == "True" else False

                if config['default'][key][0] == "[" :
                    values = config['default'][key].replace("[","").replace("]","").split(" ")
                    argsDict[key] = [cast_f(value.replace(",","")) for value in values]
                else:
                    argsDict[key] = cast_f(config['default'][key])

    args = Bunch(argsDict)

    net = netBuilder.netMaker(args)
    net.eval()

    imgCounter = 0

    #Getting the size of feature map at the desired layer
    img = test_loader.dataset[0][0]
    imgSize = net(img[None,:,:,:])[1][-3].size(-1)

    plt.figure()
    epoch_count = 0
    colors = cm.rainbow(np.linspace(0, 1, args.clust))

    for weigFile in weigFiles[0]:
        epoch_count +=1
        print("Epoch",epoch_count)

        net.load_state_dict(torch.load(weigFile))

        batch_count = 1
        outputComputed = False

        for i in range(args.clust):
            open("feature_map_{}_pos_tmp.csv".format(i),'w')
            open("feature_map_{}_neg_tmp.csv".format(i),'w')

        clusDisSum = torch.zeros(args.clust)

        for data, origTarget in test_loader:

            target = mnist.merge(origTarget)

            if batch_count%(batch_nb//10) ==0:
                print("\tbatch",batch_count,"on",batch_nb)
            batch_count +=1

            _,actArr = net(data)

            act = actArr[-3].view(args.clust,-1,imgSize,imgSize)

            for i in range(len(act)):

                mapsPos = mnist.masked_index(act[i],0,(target != 0).long())
                mapsNeg = mnist.masked_index(act[i],0,((1-target) != 0).long())

                if mapsPos.size(0) != 0:
                    nonEmptyPos = mnist.masked_index(mapsPos,0,(mapsPos.sum(dim=1).sum(dim=1) != 0).long())
                    writeMap(nonEmptyPos,"feature_map_{}_pos_tmp.csv".format(i))

                if mapsNeg.size(0) != 0:
                    nonEmptyNeg = mnist.masked_index(mapsNeg,0,(mapsNeg.sum(dim=1).sum(dim=1) != 0).long())
                    writeMap(nonEmptyNeg,"feature_map_{}_neg_tmp.csv".format(i))

        plotVariance("pos",args.clust,epoch_count,colors,netId,layNb,args.exp_id)
        plotVariance("neg",args.clust,epoch_count,colors,netId,layNb,args.exp_id)
示例#3
0
def weigthTrajectoryLength(args):

    weigthTrajectoryLength(args)

    #Count the number of net in the experiment
    netNumber = len(glob.glob("../nets/{}/*.ini".format(args.exp_id)))

    #Get and sort the experiment file
    weigFiles = sortExperiFiles("../nets/"+args.exp_id+"/clustDetectNet*_epoch*".format(args.exp_id),netNumber)
    paramDictPaths = sorted(glob.glob("../nets/"+str(args.exp_id)+"/*.ini"))

    #Getting the dataset and the boolean parameter inweig
    #Assuming the all the net in the exp have the same dataset
    #and the same value for the boolean parameter inweig
    config = configparser.ConfigParser()
    config.read(paramDictPaths[0])
    dataset = config['default']["dataset"]
    inweig = config['default']["inweig"]
    clust =  int(config['default']["clust"])

    for i in range(len(weigFiles)):

        net = netBuilder.netMaker(args)

        #Getting the parameters the net had just after initializing
        #The initial parameters of the net are used to know how strong is
        #their evolution
        net.load_state_dict(torch.load(weigFiles[i,0]))
        #for p in net.parameters():
        #    print(p.size())

        firstParams = np.array(list(net.parameters()))
        for j in range(len(firstParams)):
            firstParams[j] = np.array(firstParams[j].detach().numpy())

        #Initializing the old parameters, i.e. the parameters of the last epoch
        distArr = np.zeros((len(weigFiles[i]),len(firstParams)))

        oldParams = np.array(list(net.parameters()))
        for j in range(len(oldParams)):
            oldParams[j] = np.array(oldParams[j].detach().numpy())

        for j in range(1,len(weigFiles[i])):
            #Getting the parameters for the current epoch
            net.load_state_dict(torch.load(weigFiles[i,j]))

            newParams = np.array(list(net.parameters()))
            for k in range(len(newParams)):
                newParams[k] = np.array(newParams[k].detach().numpy())

            #Computing the difference between the last weights and the current weights
            diffArr = (newParams-oldParams)/firstParams

            #Computing the distance between the preceding weights and the current weights
            for k in range(len(diffArr)):
                distArr[j-1,k] = np.sqrt(np.power(diffArr[k],2).sum())

            #Updating the old parameters
            oldParams = np.array(list(net.parameters()))
            for k in range(len(oldParams)):
                oldParams[k] = np.array(oldParams[k].detach().numpy())

        # Building the mean roc curve of all the network in the experiment
        plot = plt.figure()
        ax1 = plot.add_subplot(111)
        plt.xlabel('Epoch');
        plt.ylabel('Distance')
        ax1.plot(distArr[:,0], 'yellow', label="Clust conv1")
        ax1.plot(distArr[:,2], 'orange', label="Clust conv2")
        ax1.plot(distArr[:,4], 'r', label="Clust softmax")
        ax1.plot(distArr[:,6], 'cyan', label="Detect conv1")
        ax1.plot(distArr[:,8], 'blue', label="Detect conv2")
        ax1.plot(distArr[:,10], 'm', label="Detect softmax")

        plot.legend()
        plt.grid()
        plot.tight_layout()
        plt.savefig('../vis/{}/net{}_weightsDistances.png'.format(args.exp_id,i))
示例#4
0
def activationSparsity(args):

    #Count the number of net in the experiment
    netNumber = len(glob.glob("../nets/{}/*.ini".format(args.exp_id)))

    #Get and sort the experiment file
    weigFiles = sortExperiFiles("../nets/"+args.exp_id+"/clustDetectNet*_epoch*".format(args.exp_id),netNumber)
    paramDictPaths = sorted(glob.glob("../nets/"+str(args.exp_id)+"/*.ini"))

    config = configparser.ConfigParser()

    _,test_loader = dataLoader.loadData(args.dataset,args.batch_size,args.test_batch_size)

    #Assuming the all the net in the exp have the same dataset
    #and the same value for the boolean parameter inweig
    config.read(paramDictPaths[0])
    dataset = config['default']["dataset"]
    inweig = (config['default']["inweig"] == 'True')
    clust = int(config['default']["clust"])

    #Plotting the loss across epoch and nets
    plotHist = plt.figure(1,figsize=(8,5))
    ax1 = plotHist.add_subplot(111)
    box = ax1.get_position()
    ax1.set_position([box.x0, box.y0, box.width * 0.7, box.height])

    plt.xlabel('Epoch')
    plt.ylabel('Sparsity')
    handlesInp = []
    handlesConv1 = []
    handlesConv2 = []

    #cmap = cm.get_cmap(name='rainbow')
    colors = cm.rainbow(np.linspace(0, 1, len(weigFiles)))

    for i in range(len(weigFiles)):

        print("Net",i)
        #Reading general parameters
        config.read(paramDictPaths[i])
        paramDict = config['default']

        #check if net parameter are in the config file
        #if they are not : using the default ones
        if not 'biasclu' in config['default']:
            config.read("clust.config")

        config['default']["runCuda"] = str(args.cuda)

        paramNamespace = Bunch(config['default'])

        net = netBuilder.netMaker(paramNamespace)
        net.eval()

        sparsInpMean = np.empty((len(weigFiles[0])))
        sparsConv1Mean = np.empty((len(weigFiles[0])))
        sparsConv2Mean = np.empty((len(weigFiles[0])))

        for j in range(len(weigFiles[0])):

            net.load_state_dict(torch.load(weigFiles[i,j]))

            sparsInpMean[j] = 0
            sparsConv1Mean[j] = 0
            sparsConv2Mean[j] = 0

            for data, origTarget in test_loader:

                output,actArr = net(data)
                cluDis = net.cluDis
                clusts = actArr[2]
                maps = actArr[-3]
                summed_maps = actArr[-2]

                sparsInpMean[j] += computeSparsity(actArr[3]).mean()*len(data)/len(test_loader.dataset)
                sparsConv1Mean[j] += computeSparsity(actArr[4]).mean()*len(data)/len(test_loader.dataset)
                sparsConv2Mean[j] += computeSparsity(actArr[5]).mean()*len(data)/len(test_loader.dataset)

        label = ''.join((str(param)+"="+str(paramDict[param]+",")) for param in args.spar)

        handlesInp += ax1.plot(sparsInpMean, label=label,color=colors[i])
        handlesConv1 += ax1.plot(sparsConv1Mean, label=label,color=colors[i], dashes = [6,2])
        handlesConv2 += ax1.plot(sparsConv2Mean, label=label,color=colors[i], dashes = [2,2])

        ax1.set_ylim([0, 1])

        legInp = plotHist.legend(handles=handlesInp, loc='upper right' ,title="Input")
        legConv1 = plotHist.legend(handles=handlesConv1, loc='center right' ,title="Conv1")
        legConv2 = plotHist.legend(handles=handlesConv2, loc='lower right' ,title="Conv2")

        plotHist.gca().add_artist(legInp)
        plotHist.gca().add_artist(legConv1)
        plotHist.gca().add_artist(legConv2)

        plt.grid()
        plt.savefig('../vis/{}/histo.pdf'.format(args.exp_id))
示例#5
0
def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument(
        '--noise',
        type=float,
        metavar='NOISE',
        help=
        'the amount of noise to add in the gradient of the clustNet (in percentage)(default: 0.1)'
    )

    argreader.parser.add_argument(
        '--optim',
        type=str,
        default="SGD",
        metavar='OPTIM',
        help='the optimizer algorithm to use (default: \'SGD\')')
    argreader.parser.add_argument(
        '--init',
        type=str,
        default=None,
        metavar='N',
        help='the weights to use to initialize the detectNets')

    #Reading the comand line arg
    argreader.getRemainingArgs()

    #Getting the args from command line and config file
    args = argreader.args

    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    train_loader, test_loader, perm = dataLoader.loadData(
        args.dataset, args.batch_size, args.test_batch_size, args.permutate,
        args.cuda, args.num_workers, args.crop_size_imagenet, args.train_prop)

    if args.write_img_ex:

        for i in range(10):
            tensor = test_loader.dataset[i][0]
            vis.writeImg(
                '../vis/{}/{}_img{}.jpg'.format(args.exp_id, args.dataset, i),
                tensor.detach().numpy())

            origSize = tensor.size()
            tensor = tensor.view(-1)
            tensor = tensor[np.argsort(perm)]
            tensor = tensor.view(origSize)

            vis.writeImg(
                '../vis/{}/{}_img{}_noperm.jpg'.format(args.exp_id,
                                                       args.dataset, i),
                tensor.detach().numpy())

    #The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))
    if not (os.path.exists("../results/{}".format(args.exp_id))):
        os.makedirs("../results/{}".format(args.exp_id))
    if not (os.path.exists("../nets/{}".format(args.exp_id))):
        os.makedirs("../nets/{}".format(args.exp_id))

    netType = "net"

    #Write the arguments in a config file so the experiment can be re-run
    argreader.writeConfigFile("../nets/{}/{}{}.ini".format(
        args.exp_id, netType, args.model_id))

    #The writer for tensorboardX
    writer = SummaryWriter("../results/{}".format(args.exp_id))

    print("Model :", args.model_id, "Experience :", args.exp_id)

    #Building the net
    net = netBuilder.netMaker(args)

    if args.cuda:
        net.cuda()

    startEpoch = initialize_Net_And_EpochNumber(net, args.start_mode,
                                                args.init_path, args.exp_id,
                                                args.model_id, args.cuda,
                                                netType)

    #Getting the contructor and the kwargs for the choosen optimizer
    optimConst, kwargs = get_OptimConstructor_And_Kwargs(
        args.optim, args.momentum)

    #If no learning rate is schedule is indicated (i.e. there's only one learning rate),
    #the args.lr argument will be a float and not a float list.
    #Converting it to a list with one element makes the rest of processing easier
    if type(args.lr) is float:
        args.lr = [args.lr]

    #Train and evaluate the clustering detecting network for several epochs
    lrCounter = 0

    for epoch in range(startEpoch, args.epochs + 1):

        #This condition determines when the learning rate should be updated (to follow the learning rate schedule)
        #The optimiser have to be rebuilt every time the learning rate is updated
        if (epoch - 1) % (
            (args.epochs + 1) // len(args.lr)) == 0 or epoch == startEpoch:

            kwargs['lr'] = args.lr[lrCounter]
            print("Learning rate : ", kwargs['lr'])
            optimizer = optimConst(net.parameters(), **kwargs)

            if lrCounter < len(args.lr) - 1:
                lrCounter += 1

        trainDetect(net, optimizer, train_loader, epoch, writer, args)
        with torch.no_grad():
            testDetect(net, test_loader, epoch, writer, args)
示例#6
0
def main(argv=None):

    #Getting arguments from config file and command line
    #Building the arg reader
    argreader = ArgReader(argv)

    argreader.parser.add_argument(
        '--max_act',
        type=str,
        nargs='*',
        metavar='VAL',
        help=
        'To visualise an image that maximise the activation of one unit in the last layer. \
                        The values are :\
                            the path to the model, \
                            the number of images to be created, \
                            the layer to optimise. Can be \'conv\' or \'dense\' \
                            the unit to optimise. If not indicated, the unit number i will be optimised if image has label number i.'
    )

    argreader.parser.add_argument(
        '--stop_thres',
        type=float,
        default=0.000005,
        metavar='NOISE',
        help=
        'If the distance travelled by parameters during activation maximisation become lesser than this parameter, the optimisation stops.'
    )

    argreader.parser.add_argument(
        '--reg_weight',
        type=float,
        default=0,
        metavar='NOISE',
        help='The weight of the regularisation during activation maximisation.'
    )

    argreader.parser.add_argument(
        '--plot_feat_map',
        type=str,
        nargs='*',
        metavar='VAL',
        help='To visualise the last feature map of a model. \
                        The values are the path to the model weights,  the number of input image to be pass through \
                        the net and the number of final feature map to plot. \
                        The --exp_id, --model_id and --model must be set.')

    #Reading the comand line arg
    argreader.getRemainingArgs()

    #Getting the args from command line and config file
    args = argreader.args
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    #The folders where the experience file will be written
    if not (os.path.exists("../vis/{}".format(args.exp_id))):
        os.makedirs("../vis/{}".format(args.exp_id))

    if args.max_act:

        modelPath = args.max_act[0]
        nbImages = int(args.max_act[1])
        layToOpti = args.max_act[2]

        random.seed(args.seed)

        #Building the net
        model = netBuilder.netMaker(args)
        model.load_state_dict(torch.load(modelPath))

        _, test_loader, _ = dataLoader.loadData(args.dataset, args.batch_size,
                                                1, False, args.cuda,
                                                args.num_workers)

        #Comouting image that maximises activation of the given unit in the given layer
        maxInd = len(test_loader.dataset) - 1

        model.eval()

        for i, (image, label) in enumerate(test_loader):

            print("Image ", i)

            img = Variable(test_loader.dataset[i][0]).unsqueeze(0)
            img.requires_grad = True

            writeImg('../vis/{}/img_'.format(args.exp_id) + str(i) + '.jpg',
                     image[0].detach().numpy())

            if len(args.max_act) == 4:
                unitInd = int(args.max_act[3])
            else:
                unitInd = label.item()

            opt(img,model,args.exp_id,args.model_id,i,unitInd=unitInd,lr=args.lr,momentum=args.momentum,optimType='LBFGS',layToOpti=layToOpti,\
                epoch=args.epochs,nbPrint=args.log_interval,stopThre=args.stop_thres,reg_weight=args.reg_weight)

            if i == nbImages - 1:
                break

    if args.plot_feat_map:

        modelPath = args.plot_feat_map[0]
        nbImages = int(args.plot_feat_map[1])
        nbFeatMaps = int(args.plot_feat_map[2])
        margin = 2

        #Building the net
        model = netBuilder.netMaker(args)
        model.load_state_dict(torch.load(modelPath))

        _, test_loader, _ = dataLoader.loadData(args.dataset, args.batch_size,
                                                1, False, args.cuda,
                                                args.num_workers)

        #Comouting image that maximises activation of the given unit in the given layer
        maxInd = len(test_loader.dataset) - 1

        model.eval()

        bigImg = None

        totalW = 0
        totalH = 0

        sortedMapInds = getMostImportantFeatMapsInd(model, args.exp_id,
                                                    args.model_id)

        imgLabelList = [test_loader.dataset[i] for i in range(nbImages)]
        imgList, _ = zip(*sorted(imgLabelList, key=lambda x: x[1]))

        for i in range(nbImages):

            img = imgList[i]
            inSize = img.shape[1], img.shape[2]
            if bigImg is None:
                bigImg = np.zeros((nbImages * (img.shape[1] + margin),
                                   (nbFeatMaps + 1) * (img.shape[2] + margin)))

            bigImg[i * (img.shape[1] + margin):(i + 1) * img.shape[1] +
                   i * margin, :img.shape[2]] = img.squeeze()

            _, featMaps = model(img.unsqueeze(0))

            #Taking only the most important feature map
            print(featMaps.shape)
            featMaps = featMaps[0, sortedMapInds]

            for j in range(1, min(11, len(featMaps[0] + 1))):

                img = featMaps[j].detach().numpy()
                img = resize(img,
                             inSize,
                             mode="constant",
                             order=0,
                             anti_aliasing=True)

                bigImg[i * (img.shape[0] + margin):(i + 1) * img.shape[0] +
                       i * margin, j * (img.shape[1] + margin):(j + 1) *
                       (img.shape[1]) + j * margin] = img

                totalW += img.shape[0] + margin

        writeImg('../vis/{}/model_{}.png'.format(args.exp_id, args.model_id),
                 bigImg[np.newaxis],
                 size=(300 * nbImages, 300 * min(11, len(featMaps[0] + 1))))