Пример #1
0
    def spectrum_dct(self):

        """
        Calculo del espectro segun transformada discreta del coseno
        """

        return Transforms.dct(self)
Пример #2
0
    def temp_add_transforms(self):
        # TODO: accept multiple arguments for features to add
        # this function is currently only for debugging
        # create features/transforms

        self.df['mmi'] = Transforms.mmi(self.df, 500)
        self.df['sma'] = self.df.price.rolling(window=5, center=False).mean()
Пример #3
0
    def validationTransform(self, rgb, depth):
        first_resize = tuple(
            map(
                int,
                list((250.0 / IMAGE_HEIGHT) *
                     np.array([IMAGE_HEIGHT, IMAGE_WIDTH]))))
        depth_np = depth
        transform = T.Compose([
            T.Resize(first_resize),
            T.CenterCrop((228, 304)),
            T.Resize(self.output_size),
        ])
        rgb_np = transform(rgb)
        rgb_np = np.asfarray(rgb_np, dtype='float') / 255
        depth_np = transform(depth_np)

        return rgb_np, depth_np
Пример #4
0
    def trainTransform(self, rgb, depth):
        s = np.random.uniform(1.0, 1.5)  # random scaling
        depth_np = depth / s
        angle = np.random.uniform(-5.0, 5.0)  # random rotation degrees
        do_flip = np.random.uniform(0.0, 1.0) < 0.5  # random horizontal flip

        # perform 1st step of data augmentation
        first_resize = tuple(
            map(
                int,
                list((250.0 / IMAGE_HEIGHT) *
                     np.array([IMAGE_HEIGHT, IMAGE_WIDTH]))))
        second_resize = tuple(
            map(int, list(s * np.array([IMAGE_HEIGHT, IMAGE_WIDTH]))))
        transform = T.Compose([
            T.Resize(
                first_resize
            ),  # this is for computational efficiency, since rotation can be slow
            T.Rotate(angle),
            T.Resize(second_resize),
            T.CenterCrop((228, 304)),
            T.HorizontalFlip(do_flip),
            T.Resize(self.output_size),
        ])
        rgb_np = transform(rgb)
        rgb_np = self.color_jitter(rgb_np)  # random color jittering
        rgb_np = np.asfarray(rgb_np, dtype='float') / 255
        depth_np = transform(depth_np)

        return rgb_np, depth_np
Пример #5
0
    def new_event(self, date, event_index=None):
        '''returns a new event'''

        # if not any memory is loaded
        if not "_self" in self.current_streamview.atoms.keys():
            return None

        # if event is specified, play it now
        if event_index != None:
            self.reset()
            event_index = int(event_index)
            z_, event = self.current_streamview.atoms["_self"].memorySpace[
                event_index]
            # using actual transformation?
            transforms = [Transforms.NoTransform()]
            self.waiting_to_jump = False
        else:
            # get global activity
            global_activity = self.get_merged_activity(
                date, merge_actions=self.merge_actions)

            # if going to jump, erases peak in neighbour event
            if self.waiting_to_jump:
                zetas = global_activity.get_dates_list()
                states, _ = self.current_streamview.atoms[
                    "_self"].memorySpace.get_events(zetas)
                for i in range(0, len(states)):
                    if states[i].index == self.improvisation_memory[-1][
                            0].index + 1:
                        del global_activity[i]
                self.waiting_to_jump = False

            if len(global_activity) != 0 and len(
                    self.improvisation_memory) > 0:
                event, transforms = self.decide(global_activity)
                if event == None:
                    # if no event returned, choose default
                    event, transforms = self.decide_default()
                if type(transforms) != list:
                    transforms = [transforms]
            else:
                # if activity is empty, choose default
                event, transforms = self.decide_default()
            for transform in transforms:
                event = transform.decode(event)
        # add event to improvisation memory
        self.improvisation_memory.append((event, transforms))
        # influences private streamview if auto-influence activated
        if self.self_influence:
            self.current_streamview.influence("_self", date, event.get_label())
        # sends state num
        self.send([
            event.index,
            event.get_contents().get_zeta(),
            event.get_contents().get_state_length()
        ], "/state")
        return event
Пример #6
0
 def decide_default(self):
     '''default decision method : selecting conjoint event'''
     if len(self.improvisation_memory) != 0:
         previousState = self.improvisation_memory[-1][0]
         new = self.current_streamview.atoms["_self"].memorySpace[
             (previousState.index + 1) %
             len(self.current_streamview.atoms["_self"].memorySpace)]
         trans = self.improvisation_memory[-1][1]
     else:
         new = self.current_streamview.atoms["_self"].memorySpace[0]
         trans = [Transforms.NoTransform()]
     return new[1], trans
Пример #7
0
    def __init__(self, root_dir,shape_data,img_transofrm=None,label_transform=None,nameImageFile="images",nameMasksFile="masks"):

        self.shape_data=shape_data
        self.root_dir=root_dir
        self.img_transofrm = img_transofrm
        self.label_transform = label_transform
        self.nameImageFile=nameImageFile
        self.nameMasksFile=nameMasksFile
        self.files = list()
        self.files=os.listdir(self.root_dir)


         ####Augmentation###
        self.TrChannels=trs.TransformColorChannels()
        self.TrFlip=trs.TransformFlip()
        self.colorJitter=trs.TransformsColorJitter(0.65,0.8,0.8,0.09)
        self.TrNoisy=trs.TransformNoisyNormal((128,128,128),(128,128,128),0.98,0.02)
        self.TrBlur=trs.TransformBlur(3)
        ###################


        print("Found files: ",len(self.files))
Пример #8
0
    def __init__ (self,rootData,shapeData,imgTransforms=None, labelTransforms=None):
        self.rootData=rootData
        self.shapeData=shapeData
        self.imgTransforms=imgTransforms
        self.labelTransforms=labelTransforms
        self.filesDataset=list()

         ####Augmentation###
        self.TrChannels=trs.TransformColorChannels()
        self.TrFlip=trs.TransformFlip()
        self.colorJitter=trs.TransformsColorJitter(0.65,0.8,0.8,0.09)
        self.TrNoisy=trs.TransformNoisyNormal((128,128,128),(128,128,128),0.9,0.1)
        self.TrBlur=trs.TransformBlur(5)
        ###################

        rootFiles1=os.listdir(rootData)
        for fileName in rootFiles1:
            cropped_train=os.path.join(rootData,fileName,"cropped_train")
            instrument_datasets=os.listdir(cropped_train)
            for instrument_dataset in instrument_datasets:
                imagesPath=os.path.join(cropped_train,instrument_dataset,"images")
                masksPath=os.path.join(cropped_train,instrument_dataset,"binary_masks")

                images=os.listdir(imagesPath)
                masks=os.listdir(masksPath)

                lenImgs=len(images)
                lenMasks=len(masks)
                if lenImgs!=lenMasks:
                    strError = "error of the dimension of the list of images (" + str(lenIms) + ") and masks (" + str(lenMsks) + ")"
                    raise IOError(strError)
                for i in range(lenImgs):
                    img_file = os.path.join(imagesPath,images[i])
                    msk_file = os.path.join(masksPath,masks[i])
                    self.filesDataset.append({
                        "image":img_file,
                        "label":msk_file
                        })
Пример #9
0
    def open_transform_operations(self):
        transform_window = tk.Tk(screenName=None,
                                 baseName=None,
                                 className='Tk',
                                 useTk=1)
        transform_window.title('Transform Operations')
        transform_window.geometry("400x400")

        swirl_button = tk.Button(transform_window,
                                 text='Swirl Operation with custom image',
                                 width=self.width,
                                 command=lambda: transforms.swirled(self.img))
        swirl_button.pack()

        swirl_with_checker_board_button = tk.Button(
            transform_window,
            text='Swirl Operation with checker board',
            width=self.width,
            command=lambda: transforms.swirled_with_checkerboard())
        swirl_with_checker_board_button.pack()

        rescale_button = tk.Button(
            transform_window,
            text="Rescale with anti anti aliasing",
            width=self.width,
            command=lambda: transforms.rescale(self.img))
        rescale_button.pack()

        resize_button = tk.Button(transform_window,
                                  text="Resize Operation with anti aliasing",
                                  width=self.width,
                                  command=lambda: transforms.resize(self.img))
        resize_button.pack()

        downscale_button = tk.Button(
            transform_window,
            text='Downscale Operation',
            width=self.width,
            command=lambda: transforms.downscale(self.img_gray))
        downscale_button.pack()

        rotation_button = tk.Button(
            transform_window,
            text="Rotation Operation",
            width=self.width,
            command=lambda: transforms.rotation(self.img))
        rotation_button.pack()
Пример #10
0
    def __getitem__(self, index):
        rgb, depth = self.__getraw__(index)
        if self.transform is not None:
            rgb_np, depth_np = self.transform(rgb, depth)
        else:
            raise (RuntimeError("transform not defined"))

        # color normalization
        # rgb_tensor = normalize_rgb(rgb_tensor)
        # rgb_np = normalize_np(rgb_np)

        if self.modality == 'rgb':
            input_np = rgb_np

        to_tensor = T.ToTensor()
        input_tensor = to_tensor(input_np)
        while input_tensor.dim() < 3:
            input_tensor = input_tensor.unsqueeze(0)
        depth_tensor = to_tensor(depth_np)
        depth_tensor = depth_tensor.unsqueeze(0)

        return input_tensor, depth_tensor
Пример #11
0
class CustomDataLoader(data.Dataset):
    modality_names = ['rgb']

    def isImageFile(self, filename):
        IMG_EXTENSIONS = ['.h5']
        return any(
            filename.endswith(extension) for extension in IMG_EXTENSIONS)

    def findClasses(self, dir):
        classes = [
            d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))
        ]
        classes.sort()
        class_to_idx = {classes[i]: i for i in range(len(classes))}
        return classes, class_to_idx

    def makeDataset(self, dir, class_to_idx):
        images = []
        print(dir)
        dir = os.path.expanduser(dir)
        for target in sorted(os.listdir(dir)):
            d = os.path.join(dir, target)
            if not os.path.isdir(d):
                continue
            for root, _, fnames in sorted(os.walk(d)):
                for fname in sorted(fnames):
                    if self.isImageFile(fname):
                        path = os.path.join(root, fname)
                        item = (path, class_to_idx[target])
                        images.append(item)
        return images

    color_jitter = T.ColorJitter(0.4, 0.4, 0.4)

    def __init__(self, root, split, modality='rgb', loader=h5Loader):
        classes, class_to_idx = self.findClasses(root)
        imgs = self.makeDataset(root, class_to_idx)
        assert len(imgs) > 0, "Found 0 images in subfolders of: " + root + "\n"
        # print("Found {} images in {} folder.".format(len(imgs), split))
        self.root = root
        self.imgs = imgs
        self.classes = classes
        self.class_to_idx = class_to_idx
        if split == 'train':
            self.transform = self.trainTransform
        elif split == 'holdout':
            self.transform = self.validationTransform
        elif split == 'val':
            self.transform = self.validationTransform
        else:
            raise (RuntimeError("Invalid dataset split: " + split + "\n"
                                "Supported dataset splits are: train, val"))
        self.loader = loader

        assert (modality in self.modality_names), "Invalid modality split: " + modality + "\n" + \
                                                  "Supported dataset splits are: " + ''.join(self.modality_names)
        self.modality = modality

    # def trainTransform(self, rgb, depth):
    #     raise (RuntimeError("train_transform() is not implemented. "))
    #
    # def validationTransform(rgb, depth):
    #     raise (RuntimeError("val_transform() is not implemented."))

    def __getraw__(self, index):
        """
        Args:
            index (int): Index

        Returns:
            tuple: (rgb, depth) the raw data.
        """
        path, target = self.imgs[index]
        rgb, depth = self.loader(path)
        return rgb, depth

    def __getitem__(self, index):
        rgb, depth = self.__getraw__(index)
        if self.transform is not None:
            rgb_np, depth_np = self.transform(rgb, depth)
        else:
            raise (RuntimeError("transform not defined"))

        # color normalization
        # rgb_tensor = normalize_rgb(rgb_tensor)
        # rgb_np = normalize_np(rgb_np)

        if self.modality == 'rgb':
            input_np = rgb_np

        to_tensor = T.ToTensor()
        input_tensor = to_tensor(input_np)
        while input_tensor.dim() < 3:
            input_tensor = input_tensor.unsqueeze(0)
        depth_tensor = to_tensor(depth_np)
        depth_tensor = depth_tensor.unsqueeze(0)

        return input_tensor, depth_tensor

    def __len__(self):
        return len(self.imgs)
Пример #12
0
def trainValidateSegmentation(args):

    print('Data file: ' + str(args.cached_data_file))
    print(args)

    # check if processed data file exists or not
    if not os.path.isfile(args.cached_data_file):
        dataLoader = ld.LoadData(args.data_dir, args.data_dir_val,
                                 args.classes, args.cached_data_file)
        data = dataLoader.processData()
        if data is None:
            print('Error while pickling data. Please check.')
            exit(-1)
    else:
        data = pickle.load(open(args.cached_data_file, "rb"))
    print('=> Loading the model')
    model = net.ESPNet(classes=args.classes, channels=args.channels)
    args.savedir = args.savedir + os.sep

    if args.onGPU:
        model = model.cuda()

    # create the directory if not exist
    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    if args.onGPU:
        model = model.cuda()

    if args.visualizeNet:
        import VisualizeGraph as viz
        x = Variable(
            torch.randn(1, args.channels, args.inDepth, args.inWidth,
                        args.inHeight))

        if args.onGPU:
            x = x.cuda()

        y = model(x, (128, 128, 128))  #, _, _
        g = viz.make_dot(y)
        g.render(args.savedir + os.sep + 'model', view=False)

    total_paramters = 0
    for parameter in model.parameters():
        i = len(parameter.size())
        p = 1
        for j in range(i):
            p *= parameter.size(j)
        total_paramters += p

    print('Parameters: ' + str(total_paramters))

    # define optimization criteria
    weight = torch.from_numpy(
        data['classWeights'])  # convert the numpy array to torch <- Sachin
    print('Class Imbalance Weights')
    print(weight)
    criteria = torch.nn.CrossEntropyLoss(weight)
    if args.onGPU:
        criteria = criteria.cuda()

    # We train at three different resolutions (144x144x144, 96x96x96 and 128x128x128)
    # and validate at one resolution (128x128x128)
    trainDatasetA = myTransforms.Compose([
        myTransforms.MinMaxNormalize(),
        myTransforms.ScaleToFixed(dimA=144, dimB=144, dimC=144),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor(args.scaleIn),
    ])

    trainDatasetB = myTransforms.Compose([
        myTransforms.MinMaxNormalize(),
        myTransforms.ScaleToFixed(dimA=96, dimB=96, dimC=96),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor(args.scaleIn),
    ])

    trainDatasetC = myTransforms.Compose([
        myTransforms.MinMaxNormalize(),
        myTransforms.ScaleToFixed(dimA=args.inWidth,
                                  dimB=args.inHeight,
                                  dimC=args.inDepth),
        myTransforms.RandomFlip(),
        myTransforms.ToTensor(args.scaleIn),
    ])

    valDataset = myTransforms.Compose([
        myTransforms.MinMaxNormalize(),
        myTransforms.ScaleToFixed(dimA=args.inWidth,
                                  dimB=args.inHeight,
                                  dimC=args.inDepth),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainLoaderA = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDatasetA),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=False)  #disabling pin memory because swap usage is high
    trainLoaderB = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['trainIm'], data['trainAnnot'], transform=trainDatasetB),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.num_workers,
                                               pin_memory=False)
    trainLoaderC = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['trainIm'], data['trainAnnot'], transform=trainDatasetC),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.num_workers,
                                               pin_memory=False)

    valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['valIm'], data['valAnnot'], transform=valDataset),
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=args.num_workers,
                                            pin_memory=False)

    # define the optimizer
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 args.lr, (0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=2e-4)

    if args.onGPU == True:
        cudnn.benchmark = True

    start_epoch = 0
    stored_loss = 100000000.0
    if args.resume:
        if os.path.isfile(args.resumeLoc):
            print("=> loading checkpoint '{}'".format(args.resumeLoc))
            checkpoint = torch.load(args.resumeLoc)
            start_epoch = checkpoint['epoch']
            stored_loss = checkpoint['stored_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    logFileLoc = args.savedir + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write(
            "\n%s\t%s\t%s\t%s\t%s\t" %
            ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Arguments: %s" % (str(args)))
        logger.write("\n Parameters: %s" % (str(total_paramters)))
        logger.write(
            "\n%s\t%s\t%s\t%s\t%s\t" %
            ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()

    # reduce the learning rate by 0.5 after every 100 epochs
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.step_loss,
                                                gamma=0.5)  #40
    best_val_acc = 0

    loader_idxs = [
        0, 1, 2
    ]  # Three loaders at different resolutions are mapped to three indexes
    for epoch in range(start_epoch, args.max_epochs):
        # step the learning rate
        scheduler.step(epoch)
        lr = 0
        for param_group in optimizer.param_groups:
            lr = param_group['lr']
        print('Running epoch {} with learning rate {:.5f}'.format(epoch, lr))

        if epoch > 0:
            # shuffle the loaders
            np.random.shuffle(loader_idxs)

        for l_id in loader_idxs:
            if l_id == 0:
                train(args, trainLoaderA, model, criteria, optimizer, epoch)
            elif l_id == 1:
                train(args, trainLoaderB, model, criteria, optimizer, epoch)
            else:
                lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr = \
                    train(args, trainLoaderC, model, criteria, optimizer, epoch)

        # evaluate on validation set
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = val(
            args, valLoader, model, criteria)

        print('saving checkpoint')  ## added
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': str(model),
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lossTr': lossTr,
                'lossVal': lossVal,
                'iouTr': mIOU_tr,
                'iouVal': mIOU_val,
                'stored_loss': stored_loss,
            }, args.savedir + '/checkpoint.pth.tar')

        # save the model also
        if mIOU_val >= best_val_acc:
            best_val_acc = mIOU_val
            torch.save(model.state_dict(), args.savedir + '/best_model.pth')

        with open(args.savedir + 'acc_' + str(epoch) + '.txt', 'w') as log:
            log.write(
                "\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f"
                % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))
            log.write('\n')
            log.write('Per Class Training Acc: ' + str(per_class_acc_tr))
            log.write('\n')
            log.write('Per Class Validation Acc: ' + str(per_class_acc_val))
            log.write('\n')
            log.write('Per Class Training mIOU: ' + str(per_class_iu_tr))
            log.write('\n')
            log.write('Per Class Validation mIOU: ' + str(per_class_iu_val))

        logger.write("\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.6f" %
                     (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))
        logger.flush()
        print("Epoch : " + str(epoch) + ' Details')
        print(
            "\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f"
            % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))

    logger.close()
Пример #13
0
 def idct(self):
     """
     Compute the Inverse Discrete Cosine Transform (IDCT)
     """
     return Transforms.idct(self)
Пример #14
0
 def dct(self):
     """
     Compute the Discrete Cosine Transform (DCT)
     """
     return Transforms.dct(self)
Пример #15
0
    def show_price(self):
        # plot prices of self.df
        plt.plot(self.df.price)
        plt.show()

    def temp_add_transforms(self):
        # TODO: accept multiple arguments for features to add
        # this function is currently only for debugging
        # create features/transforms

        self.df['mmi'] = Transforms.mmi(self.df, 500)
        self.df['sma'] = self.df.price.rolling(window=5, center=False).mean()


if __name__ == "__main__":
    month_list = ['df201609']

    dh = DataHandler()
    for month in month_list:
        dh.concat_month(month)
    dh.temp_add_transforms()
    dh.df["rsi"] = Transforms.rsi(dh.df.price,120)
    dh.df["expiry"] = Transforms.expiry(dh.df, 300)
    dh.temp_add_transforms()
    ax1 = plt.subplot(211)
    ax1.plot(dh.df.index, dh.df.price)
    ax2 = plt.subplot(212, sharex=ax1)
    ax2.plot(dh.df.index, dh.df.rsi)
    plt.show()
Пример #16
0
 def idct(self):
     """
     Compute the Inverse Discrete Cosine Transform (IDCT)
     """
     return Transforms.idct(self)
Пример #17
0
def trainValidateSegmentation(args):
    '''
    Main function for trainign and validation
    :param args: global arguments
    :return: None
    '''
    # check if processed data file exists or not
    if not os.path.isfile(args.cached_data_file):
        dataLoad = ld.LoadData(args.data_dir, args.classes, args.cached_data_file)
        data = dataLoad.processData()
        if data is None:
            print('Error while pickling data. Please check.')
            exit(-1)
    else:
        data = pickle.load(open(args.cached_data_file, "rb"))

    q = args.q
    p = args.p
    # load the model
    if not args.decoder:
        model = net.ESPNet_Encoder(args.classes, p=p, q=q)
        args.savedir = args.savedir + '_enc_' + str(p) + '_' + str(q) + '/'
    else:
        model = net.ESPNet(args.classes, p=p, q=q, encoderFile=args.pretrained)
        args.savedir = args.savedir + '_dec_' + str(p) + '_' + str(q) + '/'

    if args.onGPU:
        model = model.cuda()

    # create the directory if not exist
    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    if args.visualizeNet:
        x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight))

        if args.onGPU:
            x = x.cuda()

        y = model.forward(x)
        g = viz.make_dot(y)
        g.render(args.savedir + 'model.png', view=False)

    total_paramters = netParams(model)
    print('Total network parameters: ' + str(total_paramters))

    # define optimization criteria
    weight = torch.from_numpy(data['classWeights']) # convert the numpy array to torch
    if args.onGPU:
        weight = weight.cuda()

    criteria = CrossEntropyLoss2d(weight) #weight

    if args.onGPU:
        criteria = criteria.cuda()

    print('Data statistics')
    print(data['mean'], data['std'])
    print(data['classWeights'])

    #compose the data with transforms
    trainDataset_main = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1024, 512),
        myTransforms.RandomCropResize(32),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64).
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale1 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1536, 768), # 1536, 768
        myTransforms.RandomCropResize(100),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale2 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1280, 720), # 1536, 768
        myTransforms.RandomCropResize(100),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale3 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(768, 384),
        myTransforms.RandomCropResize(32),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    trainDataset_scale4 = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(512, 256),
        #myTransforms.RandomCropResize(20),
        myTransforms.RandomFlip(),
        #myTransforms.RandomCrop(64).
        myTransforms.ToTensor(args.scaleIn),
        #
    ])


    valDataset = myTransforms.Compose([
        myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Scale(1024, 512),
        myTransforms.ToTensor(args.scaleIn),
        #
    ])

    # since we training from scratch, we create data loaders at different scales
    # so that we can generate more augmented data and prevent the network from overfitting

    trainLoader = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_main),
        batch_size=args.batch_size + 2, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainLoader_scale1 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale1),
        batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainLoader_scale2 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale2),
        batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainLoader_scale3 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale3),
        batch_size=args.batch_size + 4, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainLoader_scale4 = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale4),
        batch_size=args.batch_size + 4, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    valLoader = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset),
        batch_size=args.batch_size + 4, shuffle=False, num_workers=args.num_workers, pin_memory=True)

    if args.onGPU:
        cudnn.benchmark = True

    start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resumeLoc):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resumeLoc)
            start_epoch = checkpoint['epoch']
            #args.lr = checkpoint['lr']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
    

    logFileLoc = args.savedir + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write("\n%s\t%s\t%s\t%s\t%s\t" % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
    logger.flush()

    optimizer = torch.optim.Adam(model.parameters(), args.lr, (0.9, 0.999), eps=1e-08, weight_decay=5e-4)
    # we step the loss by 2 after step size is reached
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_loss, gamma=0.5)


    for epoch in range(start_epoch, args.max_epochs):

        scheduler.step(epoch)
        lr = 0
        for param_group in optimizer.param_groups:
            lr = param_group['lr']
        print("Learning rate: " +  str(lr))

        # train for one epoch
        # We consider 1 epoch with all the training data (at different scales)
        train(args, trainLoader_scale1, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale2, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale4, model, criteria, optimizer, epoch)
        train(args, trainLoader_scale3, model, criteria, optimizer, epoch)
        lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr = train(args, trainLoader, model, criteria, optimizer, epoch)

        # evaluate on validation set
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = val(args, valLoader, model, criteria)
        
            
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': str(model),
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lossTr': lossTr,
            'lossVal': lossVal,
            'iouTr': mIOU_tr,
            'iouVal': mIOU_val,
            'lr': lr
        }, args.savedir + 'checkpoint.pth.tar')

        #save the model also
        model_file_name = args.savedir + '/model_' + str(epoch + 1) + '.pth'
        torch.save(model.state_dict(), model_file_name)

        

        with open(args.savedir + 'acc_' + str(epoch) + '.txt', 'w') as log:
            log.write("\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f" % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))
            log.write('\n')
            log.write('Per Class Training Acc: ' + str(per_class_acc_tr))
            log.write('\n')
            log.write('Per Class Validation Acc: ' + str(per_class_acc_val))
            log.write('\n')
            log.write('Per Class Training mIOU: ' + str(per_class_iu_tr))
            log.write('\n')
            log.write('Per Class Validation mIOU: ' + str(per_class_iu_val))

        logger.write("\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.7f" % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))
        logger.flush()
        print("Epoch : " + str(epoch) + ' Details')
        print("\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f" % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))
    logger.close()
Пример #18
0
        "F_beta": [],
        "MAE": []
    }
}
bests = {"F_beta_tr": 0., "F_beta_val": 0., "MAE_tr": 1., "MAE_val": 1.}

logger.info('Data statistics:')
logger.info("mean: [%.5f, %.5f, %.5f], std: [%.5f, %.5f, %.5f]" %
            (*data['mean'], *data['std']))

# compose the data with transforms

trainTransform = myTransforms.Compose([
    # myTransforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    # myTransforms.RandomCropResize(int(7./224.*args.inWidth)),
    # myTransforms.RandomFlip(),
    myTransforms.ToTensor()
])
valTransform = myTransforms.Compose([
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    myTransforms.ToTensor()
])

train_set = myDataLoader.Dataset(data['trainIm'],
                                 data['trainDepth'],
                                 data['trainAnnot'],
                                 transform=trainTransform)
val_set = myDataLoader.Dataset(data['valIm'],
                               data['valDepth'],
Пример #19
0
 def ifft(self):
     """
     Compute the Inverse FFT
     """
     return Transforms.ifft(self)
Пример #20
0
def trainValidateSegmentation(args):
    # check if processed data file exists or not
    if not os.path.isfile(args.cached_data_file):
        dataLoader = ld.LoadData(args.data_dir, args.classes,
                                 args.cached_data_file)
        if dataLoader is None:
            print('Error while processing the data. Please check')
            exit(-1)
        data = dataLoader.processData()
    else:
        data = pickle.load(open(args.cached_data_file, "rb"))

    if args.modelType == 'C1':
        model = net.ResNetC1(args.classes)
    elif args.modelType == 'D1':
        model = net.ResNetD1(args.classes)
    else:
        print('Please select the correct model. Exiting!!')
        exit(-1)

        args.savedir = args.savedir + args.modelType + '/'

    if args.onGPU == True:
        model = model.cuda()

    # create the directory if not exist
    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    if args.onGPU == True:
        model = model.cuda()

    if args.visualizeNet == True:
        x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight))

        if args.onGPU == True:
            x = x.cuda()

        y = model.forward(x)
        g = viz.make_dot(y)
        g.render(args.savedir + '/model.png', view=False)

    n_param = sum([np.prod(param.size()) for param in model.parameters()])
    print('Network parameters: ' + str(n_param))

    # define optimization criteria
    print('Weights to handle class-imbalance')
    weight = torch.from_numpy(
        data['classWeights'])  # convert the numpy array to torch
    print(weight)
    if args.onGPU == True:
        weight = weight.cuda()

    criteria = CrossEntropyLoss2d(weight)  # weight

    if args.onGPU == True:
        criteria = criteria.cuda()

    trainDatasetNoZoom = myTransforms.Compose([
        # myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.RandomCropResize(20),
        myTransforms.RandomHorizontalFlip(),
        myTransforms.ToTensor(args.scaleIn)
    ])

    trainDatasetWithZoom = myTransforms.Compose([
        # myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.Zoom(512, 512),
        myTransforms.RandomCropResize(20),
        myTransforms.RandomHorizontalFlip(),
        myTransforms.ToTensor(args.scaleIn)
    ])

    valDataset = myTransforms.Compose([
        # myTransforms.Normalize(mean=data['mean'], std=data['std']),
        myTransforms.ToTensor(args.scaleIn)
    ])

    trainLoaderNoZoom = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDatasetNoZoom),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    trainLoaderWithZoom = torch.utils.data.DataLoader(
        myDataLoader.MyDataset(data['trainIm'],
                               data['trainAnnot'],
                               transform=trainDatasetWithZoom),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)

    valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        data['valIm'], data['valAnnot'], transform=valDataset),
                                            batch_size=args.batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers,
                                            pin_memory=True)

    # define the optimizer
    # optimizer = torch.optim.Adam(model.parameters(), args.lr, (0.9, 0.999), eps=1e-08, weight_decay=2e-4)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4)

    if args.onGPU == True:
        cudnn.benchmark = True

    start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resumeLoc):
            print("=> loading checkpoint '{}'".format(args.resumeLoc))
            checkpoint = torch.load(args.resumeLoc)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    logFileLoc = args.savedir + os.sep + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write(
            "\n%s\t%s\t%s\t%s\t%s\t" %
            ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write(
            "\n%s\t%s\t%s\t%s\t%s\t" %
            ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()

    #lr scheduler
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.step_loss,
                                                gamma=0.1)

    for epoch in range(start_epoch, args.max_epochs):
        scheduler.step(epoch)

        lr = 0
        for param_group in optimizer.param_groups:
            lr = param_group['lr']

        # run at zoomed images first
        train(args, trainLoaderWithZoom, model, criteria, optimizer, epoch)
        lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr = train(
            args, trainLoaderNoZoom, model, criteria, optimizer, epoch)
        # evaluate on validation set
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = val(
            args, valLoader, model, criteria)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': str(model),
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lossTr': lossTr,
                'lossVal': lossVal,
                'iouTr': mIOU_tr,
                'iouVal': mIOU_val,
            }, args.savedir + '/checkpoint.pth.tar')

        # save the model also
        model_file_name = args.savedir + '/model_' + str(epoch + 1) + '.pth'
        torch.save(model.state_dict(), model_file_name)

        with open(args.savedir + 'acc_' + str(epoch) + '.txt', 'w') as log:
            log.write(
                "\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f"
                % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))
            log.write('\n')
            log.write('Per Class Training Acc: ' + str(per_class_acc_tr))
            log.write('\n')
            log.write('Per Class Validation Acc: ' + str(per_class_acc_val))
            log.write('\n')
            log.write('Per Class Training mIOU: ' + str(per_class_iu_tr))
            log.write('\n')
            log.write('Per Class Validation mIOU: ' + str(per_class_iu_val))

        logger.write("\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f" %
                     (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))
        logger.flush()
        print("Epoch : " + str(epoch) + ' Details')
        print(
            "\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f"
            % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))

    logger.close()
Пример #21
0
                       dtype=np.float32)

# load the model
model = BiSalNet()
model.eval()

if args.onGPU and torch.cuda.device_count() > 1:
    # model = torch.nn.DataParallel(model)
    model = DataParallelModel(model)
if args.onGPU:
    model = model.cuda()

# compose the data with transforms
valDataset = myTransforms.Compose([
    myTransforms.Normalize(mean=data['mean'], std=data['std']),
    myTransforms.Scale(args.inWidth, args.inHeight),
    myTransforms.ToTensor()
])
# since we training from scratch, we create data loaders at different scales
# so that we can generate more augmented data and prevent the network from overfitting
valLoader = torch.utils.data.DataLoader(myDataLoader.Dataset(
    data['valIm'], data['valAnnot'], transform=valDataset),
                                        batch_size=args.batch_size,
                                        shuffle=False,
                                        num_workers=args.num_workers,
                                        pin_memory=args.onGPU)

if os.path.isfile(args.resume):
    print("=> loading checkpoint '{}'".format(args.resume))
    model.load_state_dict(torch.load(args.resume)["state_dict"])
else:
Пример #22
0
def trainValSegmentation(args):
    if not os.path.isfile(args.cached_data_file):
        dataLoader = ld.LoadData(args.data_dir, args.classes, args.attrClasses,
                                 args.cached_data_file)
        if dataLoader is None:
            print("Error while cacheing the data.")
            exit(-1)
        data = dataLoader.processData()
    else:
        print("load cacheing data.")
        data = pickle.load(open(args.cached_data_file, 'rb'))
    # only unet for segmentation now.
    # model= unet.UNet(args.classes)
    # model = r18unet.ResNetUNet(args.classes)
    model = mobileunet.MobileUNet(args.classes)
    print("UNet done...")
    # if args.onGPU == True:
    model = model.cuda()
    # devices_ids=[2,3], device_ids=range(2)
    # device = torch.device('cuda:' + str(devices_ids[0]))
    # model = model.to(device)
    if args.visNet == True:
        x = Variable(torch.randn(1, 3, args.inwidth, args.inheight))
        if args.onGPU == True:
            x = x.cuda()
        print("before forward...")
        y = model.forward(x)
        print("after forward...")
        g = viz.make_dot(y)
        # g1 = viz.make_dot(y1)
        g.render(args.save_dir + '/model', view=False)
    model = torch.nn.DataParallel(model)
    n_param = sum([np.prod(param.size()) for param in model.parameters()])
    print('network parameters: ' + str(n_param))

    #define optimization criteria
    weight = torch.from_numpy(data['classWeights'])
    print(weight)
    if args.onGPU == True:
        weight = weight.cuda()
    criteria = CrossEntropyLoss2d(weight)
    # if args.onGPU == True:
    # 	criteria = criteria.cuda()

    trainDatasetNoZoom = myTransforms.Compose([
        myTransforms.RandomCropResize(args.inwidth, args.inheight),
        # myTransforms.RandomHorizontalFlip(),
        myTransforms.ToTensor(args.scaleIn)
    ])
    trainDatasetWithZoom = myTransforms.Compose([
        # myTransforms.Zoom(512,512),
        myTransforms.RandomCropResize(args.inwidth, args.inheight),
        myTransforms.RandomHorizontalFlip(),
        myTransforms.ToTensor(args.scaleIn)
    ])
    valDataset = myTransforms.Compose([
        myTransforms.RandomCropResize(args.inwidth, args.inheight),
        myTransforms.ToTensor(args.scaleIn)
    ])
    trainLoaderNoZoom = torch.utils.data.DataLoader(
        ld.MyDataset(data['trainIm'],
                     data['trainAnnot'],
                     transform=trainDatasetNoZoom),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)
    trainLoaderWithZoom = torch.utils.data.DataLoader(
        ld.MyDataset(data['trainIm'],
                     data['trainAnnot'],
                     transform=trainDatasetWithZoom),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True)
    valLoader = torch.utils.data.DataLoader(ld.MyDataset(data['valIm'],
                                                         data['valAnnot'],
                                                         transform=valDataset),
                                            batch_size=args.batch_size_val,
                                            shuffle=True,
                                            num_workers=args.num_workers,
                                            pin_memory=True)

    #define the optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr, (0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=2e-4)
    # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.99, weight_decay=5e-4)
    # optimizer = torch.optim.SGD([
    #        {'params': [param for name, param in model.named_parameters() if name[-4:] == 'bias'],
    #         'lr': 2 * args.lr},
    #        {'params': [param for name, param in model.named_parameters() if name[-4:] != 'bias'],
    #         'lr': args.lr, 'weight_decay': 5e-4}
    #    ], momentum=0.99)

    if args.onGPU == True:
        cudnn.benchmark = True
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resumeLoc):
            print("=> loading checkpoint '{}'".format(args.resumeLoc))
            checkpoint = torch.load(args.resumeLoc)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch{})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resumeLoc))

    logfileLoc = args.save_dir + os.sep + args.logFile
    print(logfileLoc)
    if os.path.isfile(logfileLoc):
        logger = open(logfileLoc, 'a')
        logger.write("parameters: %s" % (str(n_param)))
        logger.write("\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t" %
                     ('Epoch', 'Loss(Tr)', 'Loss(val)', 'Overall acc(Tr)',
                      'Overall acc(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()
    else:
        logger = open(logfileLoc, 'w')
        logger.write("Parameters: %s" % (str(n_param)))
        logger.write("\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t" %
                     ('Epoch', 'Loss(Tr)', 'Loss(val)', 'Overall acc(Tr)',
                      'Overall acc(val)', 'mIOU (tr)', 'mIOU (val'))
        logger.flush()

    #lr scheduler
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[30, 60, 90],
                                                     gamma=0.1)
    best_model_acc = 0
    for epoch in range(start_epoch, args.max_epochs):
        scheduler.step(epoch)
        lr = 0
        for param_group in optimizer.param_groups:
            lr = param_group['lr']
        # train(args,trainLoaderWithZoom,model,criteria,optimizer,epoch)
        lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr = train(
            args, trainLoaderNoZoom, model, criteria, optimizer, epoch)
        # print(per_class_acc_tr,per_class_iu_tr)
        lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val = val(
            args, valLoader, model, criteria)

        #save_checkpoint
        torch.save(
            {
                'epoch': epoch + 1,
                'arch': str(model),
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lossTr': lossTr,
                'lossVal': lossVal,
                'iouTr': mIOU_tr,
                'iouVal': mIOU_val,
            }, args.save_dir + '/checkpoint.pth.tar')

        #save model also
        # if overall_acc_val > best_model_acc:
        # 	best_model_acc = overall_acc_val
        model_file_name = args.save_dir + '/model_' + str(epoch + 1) + '.pth'
        torch.save(model.state_dict(), model_file_name)
        with open('../acc/acc_' + str(epoch) + '.txt', 'w') as log:
            log.write(
                "\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f"
                % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))
            log.write('\n')
            log.write('Per Class Training Acc: ' + str(per_class_acc_tr))
            log.write('\n')
            log.write('Per Class Validation Acc: ' + str(per_class_acc_val))
            log.write('\n')
            log.write('Per Class Training mIOU: ' + str(per_class_iu_tr))
            log.write('\n')
            log.write('Per Class Validation mIOU: ' + str(per_class_iu_val))

        logger.write(
            "\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.6f" %
            (epoch, lossTr, lossVal, overall_acc_tr, overall_acc_val, mIOU_tr,
             mIOU_val, lr))
        logger.flush()
        print("Epoch : " + str(epoch) + ' Details')
        print(
            "\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t Train acc = %.4f\t Val acc = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f"
            % (epoch, lossTr, lossVal, overall_acc_tr, overall_acc_val,
               mIOU_tr, mIOU_val))

    logger.close()
Пример #23
0
 def spectrum(self):
     """
     Compute the spectrum using an FFT
     Returns an instance of Spectrum
     """
     return Transforms.fft(self)
Пример #24
0
 def cqt(self):
     """
     Compute the Constant Q Transform (CQT)
     """
     return Transforms.cqt(self)
Пример #25
0
#Create Camera (very basic for now)
cam = Sh.Camera()
#Create Renderer Object: Renderer(Camera,Resolution)
rend = Rd.Renderer(cam, RESOLUTION)

if False:
    #Create 4 points: Point(x,y,z,(R,G,B))
    p1 = Sh.Point(-2, -2, 4, (0, 255, 255))
    p2 = Sh.Point(4, 0.1, 4, (255, 255, 0))
    p3 = Sh.Point(0.1, 4, 4, (255, 0, 0))
    p4 = Sh.Point(-0.1, -0.1, 0, (100, 100, 100))
    #Create a Tetrahedron
    TH1 = Sh.Tetra(p1, p2, p3, p4)
    #Create a Transformation Set
    TS1 = Tf.Transform()
    #Add several transformations to the set
    TS1.AddTransf(Tf.Rotation(130, 3))
    TS1.AddTransf(Tf.Translation(2, 2))
    TS1.AddTransf(Tf.Translation(1, 3))
    TS1.AddTransf(Tf.Translation(1, 1))
    #Create a new Tetrahedron by applying the
    #transformation set to the first tetrahedron
    TH2 = TH1.ApplyTransf(TS1)
    #Add the Tetrahedra to the Renderer
    rend.AddObject(TH1)
    rend.AddObject(TH2)
else:
    #Load a mesh
    TH1 = Sh.Mesh('boltmesh', (200, 20, 50))
    #Add the Tetrahedra to the Renderer
Пример #26
0
 def dct(self):
     """
     Compute the Discrete Cosine Transform (DCT)
     """
     return Transforms.dct(self)
Пример #27
0
# The data shows a normal distribution, so we're going to use a
# logistic mapping function.

# This time, load the entire dataset into memory.
word_vecs = h5f['word_vecs'][:]

# Calculate the mean using a sample of the vectors.
center = np.mean(word_vecs[0:sample_size].flatten())

# Verify that the mean is roughly zero.
assert(abs(center - 0.0) < 0.01)

# Learn the parameters for the logistic mapping function.
# The mapping function will saturate at 8 standard deviations, and will convert
# the values to 8-bit integers.
tf = Transforms.Transforms(np.uint8)

standard_deviations = 8
tf.learn_logistic(word_vecs, num_std=standard_deviations)

# Apply logistic mapping to the dataset vectors.
word_vecs_int = tf.apply_logistic(word_vecs)

# Load the query vectors into memory and map the values.
query_vecs = h5f['query_vecs'][:]

# Apply logistic mapping to the query vectors.
query_vecs_int = tf.apply_logistic(query_vecs)

# Take a small subset of the dataset and plot a histogram of the values.
Пример #28
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(
        description='Read an mp3 file and plot out its pitch')
    parser.add_argument('-i',
                        dest='input',
                        type=str,
                        help='Input file path',
                        default='')
    parser.add_argument('-n',
                        dest='normalize',
                        action='store_true',
                        help='Normalize input values',
                        default='')
    parser.add_argument(
        '-t',
        dest='transform',
        type=str,
        help='Use different transforms on the input audio signal',
        default='stft')
    parser.add_argument(
        '-s',
        dest='sample',
        type=int,
        help=
        'Sampling rate in Hz to use on the input audio signal while transforming',
        default='100')
    parser.add_argument(
        '-w',
        dest='window',
        type=float,
        help='Sampling window in s to use on the input audio signal for stft',
        default='0.05')
    options = parser.parse_args()

    # Error check
    if options.input == '':
        print("No input given. BYE!\n")
        return 1
    elif not os.path.isfile(options.input):
        print(f"Given input path {options.input} does not exist!")
        return 2

    # Read input file into frame rate and data
    try:
        inSignal = MP3.read(options.input, options.normalize)
    except:
        print("Reading MP3 failed")
        return 3

    figures = []
    # Plot the data for quick visualization
    if options.transform == 'none':
        for i in range(0, inSignal.channels):
            if i == 0:
                figures.append(
                    bkfigure(plot_width=1200,
                             plot_height=600,
                             x_axis_label='Time',
                             y_axis_label='Amp'))
            else:
                figures.append(
                    bkfigure(plot_width=1200,
                             plot_height=600,
                             x_axis_label='Time',
                             y_axis_label='Amp',
                             x_range=figures[0].x_range,
                             y_range=figures[0].y_range))
            figures[i].line(inSignal.time, inSignal.audioData[:, i])
    elif options.transform == 'stft':
        # STFT over the signal
        fSignal = Transforms.STFT(inSignal,
                                  windowEvery=1 / options.sample,
                                  windowLength=options.window)
        for i in range(0, inSignal.channels):
            if i == 0:
                figures.append(
                    bkfigure(plot_width=1200,
                             plot_height=400,
                             x_axis_label='Time',
                             y_axis_label='Frequency'))
            else:
                figures.append(
                    bkfigure(plot_width=1200,
                             plot_height=400,
                             x_axis_label='Time',
                             y_axis_label='Frequency',
                             x_range=figures[0].x_range,
                             y_range=figures[0].y_range))
            channelAmp = np.max(fSignal.audioData[:, :, i])
            figures[i].image(image=[fSignal.audioData[:, :, i]],
                             x=0,
                             y=0,
                             dw=fSignal.time[-1],
                             dh=fSignal.dimensionAxes[0][-1],
                             color_mapper=LinearColorMapper(high=channelAmp,
                                                            low=0,
                                                            palette=Inferno11))
    else:
        print("Unrecognized transform given!")
        return 4

    bkshow(bkcolumn(*figures))
    return 0
Пример #29
0
 def ifft(self):
     """
     Compute the Inverse FFT
     """
     return Transforms.ifft(self)
Пример #30
0
if my_args.output != None:
    plotpath = my_args.output + "/"

# DATA_PATH = "./data/pythia/"
# filename = "reco_simu_Zdd.csv"
#
# BCG_PATH = './data/'
# Filename_background = "reco_0.csv"

h = Hits.Hits(filename)
ev = Hits.Event(h)  #, 11)

# h.drawAllEvents()
# Combine events (background, or several events)
# ev.combineEvents([Hits.Event(h_background)])
# ev.combineEvents([Hits.Event(h, 12), Hits.Event(h, 11)])

ev.drawEvent3D(plotName=plotpath + "3D_Zdd.pdf")
# ev.drawEventXY()#plotName=plotpath+"3tracks_XY.pdf")
# ev.drawEventXZ()
# ev.drawEventYZ()
d = ev.data
tr = Transforms.Transforms(ev)

H, xedges, yedges = tr.HoughTransform_phi(numpoints=200,
                                          binx=200,
                                          biny=50,
                                          plotName=plotpath +
                                          "HT_Zdd_maxima.pdf")
tr.plotConformalTransform(plotpath + "CT_Zdd.pdf")
Пример #31
0
 def spectrum(self):
     """
     Compute the spectrum using an FFT
     Returns an instance of Spectrum
     """
     return Transforms.fft(self)
Пример #32
0
 def cqt(self):
     """
     Compute the Constant Q Transform (CQT)
     """
     return Transforms.cqt(self)