Beispiel #1
0
    def __init__(self, train, win, path=None, net_model='SNN', Normalize=True):
        super(STORM_DVS, self).__init__()
        data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
        )
        self.image_size = data_generation_parameters['image_size']
        self.downsample_rate = data_generation_parameters['downsample_rate']
        self.net_model = net_model

        self.path = path
        self.train = train
        self.win = win
        self.DVS_image_size = int(self.image_size / self.downsample_rate)

        if self.train == True:
            self.data_path = self.path + '/train'
        else:
            self.data_path = self.path + '/valid'

        self.files = os.listdir(self.data_path)

        DVS_file_dir = os.path.join(self.data_path, '1.txt')
        f = open(DVS_file_dir, 'rb')
        raw_data = np.loadtxt(DVS_file_dir, dtype=np.float, delimiter=' ')
        f.close()
        raw_data = raw_data.astype(int)
        self.time_step = raw_data[:, 0].max() + 1
        self.dt = win / self.time_step
Beispiel #2
0
 def __init__(self):
     super(MSE_and_L1_loss, self).__init__()
     self.L1_loss = nn.L1Loss()
     self.MSE_loss = nn.MSELoss()
     data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
     )
     self.fluorophore_density = data_generation_parameters[
         'fluorophore_density']
Beispiel #3
0
def weighted_MSE_loss(predict, GT):
    # predict = predict/predict.max()
    data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras()
    fluorophore_density = data_generation_parameters['fluorophore_density']
    loss = torch.mean(torch.pow((predict - GT) * GT, 2) / fluorophore_density + torch.pow((predict - GT) * (1 - GT), 2))
    L1_loss = nn.L1Loss()
    loss+= L1_loss(predict,torch.zeros_like(predict))
    return loss
Beispiel #4
0
    def __init__(self, probability,image_size = None):  # unit: nm
        """
        As well as the always required :attr:`probability` parameter, the
        constructor requires a :attr:`percentage_area` to control the area
        of the image to crop in terms of its percentage of the original image,
        and a :attr:`centre` parameter toggle whether a random area or the
        centre of the images should be cropped.

        :param probability: Controls the probability that the operation is
         performed when it is invoked in the pipeline.
        :type probability: Float
        """
        data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras()
        config = ConfigParser()
        config.read('../configuration.ini')
        SourceFileDirectory = data_generation_parameters['SourceFileDirectory']
        self.Magnification = data_generation_parameters['Magnification']
        self.PixelSizeOfCCD = data_generation_parameters['PixelSizeOfCCD']
        self.EmWaveLength = data_generation_parameters['EmWaveLength']
        self.NA = data_generation_parameters['NA']
        self.NumPhase = data_generation_parameters['NumPhase']
        self.SNR = data_generation_parameters['SNR']
        self.photon_num = data_generation_parameters['photon_num']
        self.f_cutoff = 1/0.61 * self.NA / self.EmWaveLength  # The coherent cutoff frequency
        self.f_cutoff = 2 * self.NA / self.EmWaveLength  # The coherent cutoff frequency

        if image_size == None:
            self.image_size = data_generation_parameters['image_size']
        else:
            self.image_size = image_size
        self.pattern_frequency_ratio = data_generation_parameters['pattern_frequency_ratio']
        self.data_num = data_generation_parameters['data_num']
        self.PixelSize = self.PixelSizeOfCCD / self.Magnification
        self.delta_x = self.PixelSize  # xy方向的空域像素间隔,单位m
        self.delta_y = self.PixelSize
        self.delta_fx = 1 / self.image_size / self.delta_x  # xy方向的频域像素间隔,单位m ^ -1
        self.delta_fy = 1 / self.image_size / self.delta_y

        if self.PixelSize > 0.61 * self.EmWaveLength / self.NA / 4:
            self.SR_image_size = self.image_size * 2
            self.SR_PixelSize = self.PixelSizeOfCCD / self.Magnification / 2
            self.upsample = True
            self.xx_upsmaple, self.yy_upsmaple, self.fx_upsmaple, self.fy_upsmaple = self.GridGenerate(up_sample=self.upsample)
            self.f_upsample = pow((self.fx_upsmaple ** 2 + self.fy_upsmaple ** 2), 1 / 2)
            self.OTF_upsmaple = self.OTF_form(fc_ratio=1,upsample = True)
        else:
            self.upsample = False

        # self.upsample = True

        self.xx, self.yy, self.fx, self.fy = self.GridGenerate(self.image_size)

        self.f = pow((self.fx ** 2 + self.fy ** 2), 1 / 2)  # The spatial freqneucy fr=sqrt( fx^2 + fy^2 )

        self.OTF = self.OTF_form(fc_ratio=1)
        self.CTF = self.CTF_form(fc_ratio=1)
        Operations.Operation.__init__(self, probability)
Beispiel #5
0
    def forward(self, predict, GT):
        mask = self.simulator.batch_image_OTF_filter(GT)
        data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
        )
        fluorophore_density = data_generation_parameters['fluorophore_density']
        loss = torch.mean(
            torch.pow((predict - GT) * mask, 2) / fluorophore_density +
            torch.pow((predict - GT) * (1 - mask), 2))

        return loss
Beispiel #6
0
    def forward(self, predict, GT):
        mask = self.simulator.batch_image_OTF_filter(GT)
        loc = mask > 0
        mask[loc] = 1
        L1_loss = nn.L1Loss()
        data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras()
        fluorophore_density = data_generation_parameters['fluorophore_density']
        loss = torch.mean(
            torch.pow((predict - GT) * mask, 2) / fluorophore_density + torch.pow((predict - GT) * (1 - mask), 2))

        loss += L1_loss(predict, torch.zeros_like(predict))
        nn.Softmax

        return loss
Beispiel #7
0
            label[label_loc[i, 0], label_loc[i, 1]] = 1

        if self.net_model != 'SNN':
            data = data.mean(axis=3)

        # plot to help debug
        eventflow_sum = data.sum(axis=3).squeeze()
        plt.subplot(121), plt.imshow(eventflow_sum)
        plt.subplot(122), plt.imshow(label * 255)
        plt.show()

        return torch.from_numpy(data), torch.from_numpy(label)


if __name__ == '__main__':
    data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
    )
    # config = ConfigParser()
    # config.read('../configuration.ini')
    output_directory = data_generation_parameters['output_directory']
    output_directory = '/data/zh/DVS_STORM_sample_data/'

    STORM_DVS_dataset = STORM_DVS(train=True, win=100, path=output_directory)
    SIM_train_dataloader = DataLoader(STORM_DVS_dataset,
                                      batch_size=1,
                                      shuffle=True)
    # STORM_DVS_dataset[0]
    for i, (data, label) in enumerate(SIM_train_dataloader):
        print(i)

        if i > 0:
            break
Beispiel #8
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=48, metavar='N',
                        help='input batch size for training (default: 1)')
    parser.add_argument('--test-batch-size', type=int, default=48, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-net', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)


    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    writer = SummaryWriter('./summaries/cifar10')

    data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras()
    data_path = data_generation_parameters['output_directory']

    train_dataset = STORM_DVS(train=True,  win=100, path=data_path, net_model='ANN')
    test_dataset = STORM_DVS(train=False,  win=100, path=data_path, net_model='ANN')

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'
    device = torch.device("cuda:6" if use_cuda else "cpu")
    net = ANN_model.Unet_4x_ANN()
    # net = nn.DataParallel(net, device_ids=[2, 3])
    net = net.to(device)
    net.apply(init_weights)
    optimizer = optim.Adam(net.parameters(), lr=args.lr)

    criterion = psf_loss(device)
    # criterion = psf_weighted_loss()
    criterion = MSE_and_L1_loss()
    # criterion = nn.CrossEntropyLoss()
    save_id = uuid.uuid4()
    for epoch in range(1, args.epochs + 1):
        train(args, net, device, train_loader, optimizer, epoch, writer, criterion, save_id)
        test(args, net, device, test_loader, epoch, writer, criterion)

    writer.close()
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='input batch size for training (default: 1)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-4,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda:0" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    writer = SummaryWriter('./summaries/cifar10')

    data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
    )
    data_path = data_generation_parameters['output_directory']

    train_dataset = STORM_DVS(train=True,
                              win=100,
                              path=data_path,
                              net_model='ANN',
                              Normalize='True')
    test_dataset = STORM_DVS(train=False,
                             win=100,
                             path=data_path,
                             net_model='ANN',
                             Normalize='True')

    # train_dataset = STORM_DVS(train=True,  win=100, path=data_path, net_model='SNN', Normalize = 'True')
    # test_dataset = STORM_DVS(train=False,  win=100, path=data_path, net_model='SNN', Normalize = 'True')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    # data_path = "E:\PHD\DVS_STORM_SOFI\DVS\DVS_data/"
    # net = model.enconder_decoder_4x_SNN().to(device)
    # net = ANN_model.resnet18().to(device)
    net = ANN_model.Unet_4x_ANN().to(device)
    # net = SNN_model.Unet_8x_SNN().to(device)

    state = torch.load('./checkpoint/6c18065b-f314-4a09-b809-5a0bb14be388' +
                       'Decoder_4x_SNN' + '.t7')
    net.load_state_dict(state['net'])

    # from collections import OrderedDict
    # def multi_GPU_net_load(model, check):
    #     new_state = OrderedDict()
    #     for layer_multi_GPU, name in state['net'].items():
    #         layer_single_gpu = layer_multi_GPU[7:]
    #         new_state[layer_single_gpu] = name
    #     model.load_state_dict(new_state)
    #     return model
    #
    # net = multi_GPU_net_load(net,state)

    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = data.to(device), target.to(device)
        data = data.float()
        target = target.float()
        output = net(data)
        common_utils.plot_single_tensor_image(
            data[0, :, :, :].squeeze())  # for ANN
        # common_utils.plot_single_tensor_image(data[:, :, :, :, 0].squeeze()) # for SNN
        common_utils.plot_single_tensor_image(output.squeeze())
        common_utils.plot_single_tensor_image(target.squeeze())

        if batch_idx > 0:
            break
Beispiel #10
0
    def __init__(
        self,
        train=True,
        pos_thres=0.09,
        neg_thres=0.05,
        sigma_thres=0.03,
        cutoff_hz=0,
        leak_rate_hz=0.1,
        refractory_period_s=0,  # todo not yet modeled
        shot_noise_rate_hz=0.001,  # rate in hz of temporal noise events
        #  seed=42,
        seed=0,
        output_folder: str = None,
        dvs_h5: str = None,
        dvs_aedat2: str = None,
        dvs_text: str = True,
        # change as you like to see 'baseLogFrame',
        # 'lpLogFrame', 'diff_frame'
        show_dvs_model_state: str = None
        # dvs_rosbag=None
    ):  # unit: nm
        """
        As well as the always required :attr:`probability` parameter, the
        constructor requires a :attr:`percentage_area` to control the area
        of the image to crop in terms of its percentage of the original image,
        and a :attr:`centre` parameter toggle whether a random area or the
        centre of the images should be cropped.

        :param probability: Controls the probability that the operation is
         performed when it is invoked in the pipeline.
        :type probability: Float
        """
        data_generation_parameters = load_configuration_parameters.load_data_generation_config_paras(
        )
        # config = ConfigParser()
        # config.read('../configuration.ini')
        output_directory = data_generation_parameters['output_directory']
        self.Magnification = data_generation_parameters['Magnification']
        self.PixelSizeOfCCD = data_generation_parameters['PixelSizeOfCCD']
        self.EmWaveLength = data_generation_parameters['EmWaveLength']
        self.NA = data_generation_parameters['NA']
        self.image_size = data_generation_parameters['image_size']
        self.fluorophore_density = data_generation_parameters[
            'fluorophore_density']
        self.downsample_rate = data_generation_parameters['downsample_rate']
        self.parallel_frames = data_generation_parameters['parallel_frames']
        # self.image_size = data_generation_parameters['image_size']
        self.f_cutoff = 1 / 0.61 * self.NA / self.EmWaveLength  # The coherent cutoff frequency Rayleigh criterion

        self.PixelSize = self.PixelSizeOfCCD / self.Magnification / self.downsample_rate
        self.delta_x = self.PixelSize  # xy方向的空域像素间隔,单位m
        self.delta_y = self.PixelSize
        self.delta_fx = 1 / self.image_size / self.delta_x  # xy方向的频域像素间隔,单位m ^ -1
        self.delta_fy = 1 / self.image_size / self.delta_y
        self.xx, self.yy, self.fx, self.fy = self.GridGenerate(
            grid_mode='real')

        self.f_grid = pow(
            (self.fx**2 + self.fy**2),
            1 / 2)  # The spatial freqneucy fr=sqrt( fx^2 + fy^2 )

        self.OTF = self.OTF_form()
        self.OTF_padding = self.padding_OTF_generate()
        # self.CTF = self.CTF_form()

        self.pos_thres = pos_thres
        self.time_window = 100  # unit: us
        self.output_folder = output_directory

        self.num_events_total = 0

        self.sigma_thres = sigma_thres
        # initialized to scalar, later overwritten by random value array
        self.pos_thres = pos_thres
        # initialized to scalar, later overwritten by random value array
        self.neg_thres = neg_thres
        self.pos_thres_nominal = pos_thres
        self.neg_thres_nominal = neg_thres
        self.cutoff_hz = cutoff_hz
        self.leak_rate_hz = leak_rate_hz
        self.refractory_period_s = refractory_period_s
        self.shot_noise_rate_hz = shot_noise_rate_hz
        self.output_width = None
        self.output_height = None  # set on first frame
        self.show_input = show_dvs_model_state
        if seed > 0:
            np.random.seed(seed)

        self.dvs_h5 = dvs_h5
        self.dvs_aedat2 = dvs_aedat2
        self.dvs_text = dvs_text
        self.num_events_total = 0
        self.num_events_on = 0
        self.num_events_off = 0
        self.frame_counter = 0
        self.train = train