示例#1
0
def train():
    # trainloader,testloader,classes = cifar10()
    net = saliency_model(num_classes=num_classes)
    net = net.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters())
    # black_box_func = resnet(pretrained=True)
    black_box_func = torch.load(
        '/media/david/datos/Violence DATA/HockeyFights/checkpoints/resnet18-frames-Finetuned:False-3di-tempMaxPool-OnPlateau.tar'
    )
    black_box_func = black_box_func.cuda()
    loss_func = Loss(num_classes=num_classes)

    for epoch in range(num_epochs):  # loop over the dataset multiple times
        running_loss = 0.0
        running_corrects = 0.0

        for i, data in tqdm(enumerate(dataloaders_dict['train'], 0)):
            # get the inputs
            inputs_r, labels = data  #dataset load [bs,ndi,c,w,h]
            # print('dataset element: ',inputs_r.shape)
            inputs_r = inputs_r.permute(1, 0, 2, 3, 4)
            inputs = torch.squeeze(inputs_r, 0)  #get one di [bs,c,w,h]
            # print('inputs shape:',inputs.shape)
            # wrap them in Variable
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            mask, out = net(inputs, labels)
            # print('mask shape:', mask.shape)
            # print('inputs shape:',inputs.shape)
            # print('labels shape:',labels.shape)

            # inputs_r = Variable(inputs_r.cuda())
            loss = loss_func.get(mask, inputs, labels, black_box_func)
            # running_loss += loss.data[0]
            running_loss += loss.item()

            if (i % 10 == 0):
                print('Epoch = %f , Loss = %f ' % (epoch + 1, running_loss /
                                                   (batch_size * (i + 1))))

            loss.backward()
            optimizer.step()

        save_checkpoint(
            net,
            '/media/david/datos/Violence DATA/HockeyFights/checkpoints/saliency_model.tar'
        )
示例#2
0
def train():
    num_epochs = 3
    trainloader, testloader, classes = cifar10()

    net = saliency_model()
    net = net.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters())

    black_box_func = resnet()
    black_box_func = black_box_func.cuda()
    black_box_func = load_checkpoint(black_box_func,
                                     filename='./black_box_func.pth')
    #load the pretrained classfication model
    loss_func = Loss(num_classes=10)

    for epoch in range(num_epochs):  # loop over the dataset multiple times

        running_loss = 0.0
        running_corrects = 0.0

        for i, data in tqdm(enumerate(trainloader, 0)):
            # get the inputs
            inputs, labels = data

            # wrap them in Variable
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            mask, out = net(inputs, labels)

            loss = loss_func.get(mask, inputs, labels, black_box_func)
            running_loss += loss.data[0]

            if (i % 10 == 0):
                print('Epoch = %f , Loss = %f ' % (epoch + 1, running_loss /
                                                   (4 * (i + 1))))

            loss.backward()
            optimizer.step()

        save_checkpoint(net, 'saliency_model.pth')
示例#3
0
def train(mask_model, criterion, optimizer, regularizers, classifier_model,
          num_epochs, dataloader, numDynamicImages, checkpoint_path, fold):
    loss_func = Loss(num_classes=2,
                     regularizers=regularizers,
                     num_dynamic_images=numDynamicImages)
    best_loss = 1000.0
    for epoch in range(num_epochs):  # loop over the dataset multiple times
        print("----- Epoch {}/{}".format(epoch + 1, num_epochs))
        running_loss = 0.0
        running_loss_train = 0.0
        for data in tqdm(dataloader):
            inputs, labels, video_name, _, _ = data  #dataset load [bs,ndi,c,w,h]
            # print('Inputs=', inputs.size())
            # wrap them in Variable
            inputs, labels = Variable(inputs.to(DEVICE)), Variable(
                labels.to(DEVICE))
            # zero the parameter gradients
            optimizer.zero_grad()
            mask, out = mask_model(inputs, labels)
            # print('MAsk passed=', mask.size())
            loss = loss_func.get(mask, inputs, labels, classifier_model)
            # running_loss += loss.data[0]
            running_loss += loss.item()
            running_loss_train += loss.item() * inputs.size(0)
            loss.backward()
            optimizer.step()

        epoch_loss = running_loss / len(dataloader.dataset)
        epoch_loss_train = running_loss_train / len(dataloader.dataset)
        print("{} RawLoss: {:.4f} Loss: {:.4f}".format('train', epoch_loss,
                                                       epoch_loss_train))

        if checkpoint_path is not None and epoch_loss < best_loss:
            best_loss = epoch_loss
            # name = '{}_epoch={}.pth'.format(checkpoint_path)
            print('Saving model...', checkpoint_path)
            torch.save(
                {
                    'fold': fold,
                    'epoch': epoch,
                    'loss': epoch_loss,
                    'model_state_dict': mask_model.state_dict(),
                }, checkpoint_path)
def train(black_box_model,
          num_classes,
          num_epochs,
          regularizers,
          device,
          checkpoint_path,
          dataloaders_dict,
          black_box_file,
          numDynamicImages=0):
    # trainloader,testloader,classes = cifar10()
    net = saliencyModel.build_saliency_model(num_classes=num_classes)
    net = net.cuda()
    criterion = nn.CrossEntropyLoss()
    # params_to_update = net.parameters()
    # optimizer = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
    optimizer = optim.Adam(net.parameters())
    # scheduler_type = "OnPlateau"
    # if scheduler_type == "StepLR":
    #     exp_lr_scheduler = lr_scheduler.StepLR( optimizer, step_size=7, gamma=0.1 )
    # elif scheduler_type == "OnPlateau":
    #     exp_lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, verbose=True)

    black_box_model = torch.load(black_box_file)
    black_box_model = black_box_model.cuda()
    loss_func = Loss(num_classes=num_classes, regularizers=regularizers)
    best_loss = 1000.0
    for epoch in range(num_epochs):  # loop over the dataset multiple times
        print("----- Epoch {}/{}".format(epoch + 1, num_epochs))
        running_loss = 0.0
        running_loss_train = 0.0
        # running_corrects = 0.0

        for i, data in tqdm(enumerate(dataloaders_dict['train'], 0)):
            # get the inputs
            inputs, labels, video_name = data  #dataset load [bs,ndi,c,w,h]
            # print('dataset element: ',inputs_r.shape) #torch.Size([8, 1, 3, 224, 224])
            if numDynamicImages > 1:
                inputs = inputs.permute(1, 0, 2, 3, 4)
                inputs = torch.squeeze(inputs, 0)  #get one di [bs,c,w,h]
            # print('inputs shape:',inputs.shape)
            # wrap them in Variable
            inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())

            # zero the parameter gradients
            optimizer.zero_grad()

            mask, out = net(inputs, labels)
            # print('mask shape:', mask.shape)
            # print('inputs shape:',inputs.shape)
            # print('labels shape:', labels.shape)
            # print(labels)

            # inputs_r = Variable(inputs_r.cuda())
            loss = loss_func.get(mask, inputs, labels, black_box_model)
            # running_loss += loss.data[0]
            running_loss += loss.item()
            running_loss_train += loss.item() * inputs.size(0)
            # if(i%10 == 0):
            #     print('Epoch = %f , Loss = %f '%(epoch+1 , running_loss/(batch_size*(i+1))) )

            loss.backward()
            optimizer.step()
        # exp_lr_scheduler.step(running_loss)

        epoch_loss = running_loss / len(dataloaders_dict["train"].dataset)
        epoch_loss_train = running_loss_train / len(
            dataloaders_dict["train"].dataset)
        print("{} RawLoss: {:.4f} Loss: {:.4f}".format('train', epoch_loss,
                                                       epoch_loss_train))

        if epoch_loss < best_loss:
            best_loss = epoch_loss
            # self.best_model_wts = copy.deepcopy(self.model.state_dict())
            print('Saving entire saliency model...')
            save_checkpoint(net, checkpoint_path)


# def __anomaly_main__():
#     parser = argparse.ArgumentParser()
#     parser.add_argument("--batchSize", type=int, default=8)
#     parser.add_argument("--numEpochs", type=int, default=10)
#     parser.add_argument("--numWorkers", type=int, default=4)
#     parser.add_argument("--areaL", type=float, default=None)
#     parser.add_argument("--smoothL", type=float, default=None)
#     parser.add_argument("--preserverL", type=float, default=None)
#     parser.add_argument("--areaPowerL", type=float, default=None)
#     parser.add_argument("--saliencyModelFolder",type=str, default=constants.ANOMALY_PATH_SALIENCY_MODELS)
#     parser.add_argument("--blackBoxFile", type=str)  #areaL-9.0_smoothL-0.3_epochs-20
#     parser.add_argument("--videoSegmentLength", type=int, default=0)
#     parser.add_argument("--positionSegment", type=str, default='random')
#     parser.add_argument("--maxNumFramesOnVideo", type=int, default=0)
#     parser.add_argument("--shuffle", type=lambda x: (str(x).lower() == 'true'), default=False)
#     # parser.add_argument("--areaL", type=float, default=8)
#     # parser.add_argument("--smoothL", type=float, default=0.5)
#     # parser.add_argument("--preserverL", type=float, default=0.3)
#     # parser.add_argument("--areaPowerL", type=float, default=0.3)
#     # parser.add_argument("--checkpointInfo",type=str)
#     args = parser.parse_args()
#     device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

#     avgmaxDuration = 0
#     numDiPerVideos = 1
#     input_size = 224
#     data_transforms = transforms_anomaly.createTransforms(input_size)
#     dataset_source = 'frames'
#     batch_size = args.batchSize
#     num_workers = args.numWorkers
#     num_epochs = args.numEpochs
#     black_box_file = args.blackBoxFile
#     saliency_model_folder = args.saliencyModelFolder
#     num_classes = 7
#     videoSegmentLength = args.videoSegmentLength
#     positionSegment = args.positionSegment
#     maxNumFramesOnVideo = args.maxNumFramesOnVideo
#     shuffle = args.shuffle
#     # regularizers = {'area_loss_coef': args.areaL, 'smoothness_loss_coef': args.smoothL, 'preserver_loss_coef': args.preserverL, 'area_loss_power': args.areaPowerL}
#     # checkpoint_info = args.checkpointInfo
#     checkpoint_info = ''
#     areaL, smoothL, preserverL, areaPowerL = None,None,None,None

#     if args.areaL == None:
#         areaL = 8
#     else:
#         areaL = args.areaL
#         checkpoint_info += '_areaL-'+str(args.areaL)

#     if args.smoothL == None:
#         smoothL = 0.5
#     else:
#         smoothL = args.smoothL
#         checkpoint_info += '_smoothL-' + str(args.smoothL)

#     if args.preserverL == None:
#         preserverL = 0.3
#     else:
#         preserverL = args.preserverL
#         checkpoint_info += '_preserverL-' + str(args.preserverL)

#     if args.areaPowerL == None:
#         areaPowerL = 0.3
#     else:
#         areaPowerL = args.areaPowerL
#         checkpoint_info += '_areaPowerL-' + str(args.areaPowerL)

#     print('areaL, smoothL, preserverL, _areaPowerL=',areaL, smoothL, preserverL, areaPowerL)

#     regularizers = {'area_loss_coef': areaL, 'smoothness_loss_coef': smoothL, 'preserver_loss_coef': preserverL, 'area_loss_power': areaPowerL}

#     checkpoint_path = os.path.join(saliency_model_folder, 'saliency_model' + checkpoint_info + '_epochs-' + str(num_epochs) + '.tar')
#     image_datasets, dataloaders_dict = init_anomaly(batch_size, num_workers, maxNumFramesOnVideo, data_transforms, numDiPerVideos, avgmaxDuration, dataset_source, shuffle, videoSegmentLength, positionSegment)

#     # image_datasets, dataloaders_dict = init(batch_size, num_workers, interval_duration, data_transforms, dataset_source, debugg_mode, numDiPerVideos, avgmaxDuration)
#     train(num_classes, num_epochs, regularizers, device, checkpoint_path, dataloaders_dict, black_box_file, numDiPerVideos)

# __anomaly_main__()
示例#5
0
class BrainsphereModel:
    def __init__(self, functional_connectivity, patient_data, **kwargs):
        self.types = [
            'ConcentrationLinear', 'Constant', 'ConcentrationSigmoid',
            'WeightedDegreeLinear', 'WeightedDegreeSigmoid'
        ]
        self.producer = Producer(self.types)
        self.params = self.producer.params

        for key, value in kwargs.items():
            if key == "nodeCoordinates":
                self.nodeCoordinates = value
            elif key == "optimizer":
                self.optimizer = value
            elif key == "loss":
                self.loss = value
            elif key == "euclideanAdjacency":
                self.euclideanAdjacency = value
            elif key == "producer":
                self.producer = value
            elif key == "diffuser":
                self.diffuser = value
            elif key == "params":
                self.params.update(value)
            else:
                raise TypeError("Illegal Keyword '" + str(key) + "'")

        self.functionalConnectivity = functional_connectivity
        self.patientData = patient_data
        self.numNodes, _ = np.shape(functional_connectivity)
        self.loss = Loss("mse", self.patientData)
        self.lastloss = 0

        self.reset()

    def reset(self):
        self.initializer = Initializer("braak1", self.numNodes, self.params)
        self.concentration = self.initializer.get()
        self.concentrationHistory = np.copy(self.concentration)

        self.producer = Producer(self.types)
        self.diffusor = Diffusor("euclidean",
                                 self.params,
                                 EuclideanAdjacency=self.euclideanAdjacency)

    def run(self):
        stop_concentration = 1100
        timesteps = 2500

        self.reset()
        deltaT = 0.0001
        self.concentration += deltaT * (
            self.producer.produce(params=self.params,
                                  concentration=self.concentration,
                                  connectivity=self.functionalConnectivity) +
            self.diffusor.diffuse(self.concentration))
        self.concentrationHistory = np.append(self.concentrationHistory,
                                              self.concentration,
                                              axis=0)
        deltaConc = np.sum(self.concentrationHistory[1, :]) - np.sum(
            self.concentrationHistory[0, :])
        if deltaConc <= 0.0:
            return 9999999
        else:
            deltaT *= stop_concentration / timesteps / deltaConc

        while (np.sum(self.concentration) <
               stop_concentration) and (np.sum(deltaConc) > 0):
            deltaConc = deltaT * (self.producer.produce(
                params=self.params,
                concentration=self.concentration,
                connectivity=self.functionalConnectivity) +
                                  self.diffusor.diffuse(self.concentration))
            self.concentration += deltaConc
            self.concentrationHistory = np.append(self.concentrationHistory,
                                                  self.concentration,
                                                  axis=0)
            # print(self.loss.get(self.concentrationHistory))

        self.lastloss = self.loss.get(self.concentrationHistory)

        return self.lastloss

    def gradient(self, loss=None):
        if loss is None:
            loss = self.run()

        params_new = {}
        params_old = self.params.copy()
        deltaX = {}
        for key, value in params_old.items():
            deltaX[key] = np.sign(np.random.randn()) * 0.01
            params_new[key] = value + deltaX[key]

        self.params = params_new
        new_loss = self.run()
        grad = {}
        self.params = params_old

        for key, value in params_old.items():
            grad[key] = (new_loss - loss) / (deltaX[key])

        return grad

    def gradient4(self):
        loss = self.run()
        gradients = Parallel(n_jobs=4)(delayed(self.gradient)(loss)
                                       for i in range(4))

        grad = {}

        for key in self.params:
            gradsum = 0
            count = 0
            for g in gradients:
                gradsum += g.get(key)
                count += 1.0
            grad[key] = gradsum / count
        return grad
示例#6
0
def train(batch_size, num_workers, regularizers, device, checkpoint_file):
    num_epochs = 3
    trainloader, testloader, classes = cifar10(batch_size, num_workers)

    net = saliency_model()
    net = net.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters())

    # black_box_func = resnet(pretrained=True)
    # black_box_func = black_box_func.cuda()
    model_name = 'alexnet'
    # defaults.device = 'cpu'
    black_box_func = BlackBoxModel(model_name=model_name,
                                   pretrained=True,
                                   num_classes=10)
    black_box_func.load('data/checkpoints/black_box_model_alexnet.tar',
                        device='gpu')
    black_box_func.toDevice(device)
    black_box_func = black_box_func.getModel()

    loss_func = Loss(num_classes=10, regularizers=regularizers)

    for epoch in range(num_epochs):  # loop over the dataset multiple times
        print("----- Epoch {}/{}".format(epoch, num_epochs))
        running_loss = 0.0
        running_corrects = 0.0
        running_loss_train = 0.0

        for i, data in tqdm(enumerate(trainloader, 0)):
            # get the inputs
            inputs, labels = data

            # wrap them in Variable
            inputs, labels = Variable(inputs.to(device)), Variable(
                labels.to(device))

            # zero the parameter gradients
            optimizer.zero_grad()

            mask, out = net(inputs, labels)
            # print('-----mask shape:',mask.shape)
            # print('-----inputs shape:',inputs.shape)
            # print('-----labels shape:', labels.shape)
            # print(labels)

            loss = loss_func.get(mask, inputs, labels, black_box_func)
            # running_loss += loss.data[0]
            running_loss += loss.item()
            running_loss_train += loss.item() * inputs.size(0)

            # if(i%10 == 0):
            #     print('Epoch = %f , Loss = %f '%(epoch+1 , running_loss/(batch_size*(i+1))) )

            loss.backward()
            optimizer.step()
        epoch_loss = running_loss / len(trainloader.dataset)
        epoch_loss_train = running_loss_train / len(trainloader.dataset)
        print("{} RawLoss: {:.4f} Loss: {:.4f}".format("train", epoch_loss,
                                                       epoch_loss_train))
        save_checkpoint(net, checkpoint_file)