예제 #1
0
def time_delay_generator(x, y, delays, batch_size, weights=None, shuffle=True):
    '''A generator to make it easy to fit time-delay regression models,
    i.e. a model where the value of y depends on past values of x

    # Arguments
    x: input data, as a Numpy array
    y: targets, as a Numpy array or None for prediction generation
    delays: number of time-steps to include in model
    weights: Numpy array of weights for the samples
    shuffle: Whether or not to shuffle the data (set True for training)

    # Example
    if X_train is (1000,200), Y_train is (1000,1)
    train_gen = time_delay_generator(X_train, Y_train, delays=10, batch_size=100)

    train_gen is a generator that gives:
    x_batch as size (100,10,200) since each of the 100 samples includes the input
    data at the current and nine previous time steps
    y_batch as size (100,1)
    w_batch as size (100,)

    '''

    if type(delays) is int:
        delays = range(delays)

    if type(x) is not list:
        x = list([x])
    index_array = np.arange(x[0].shape[0])

    tlists = [[1, 0] + list(range(2, np.ndim(xx) + 1)) for xx in x]
    batches = _make_batches(x[0].shape[0], batch_size)
    while 1:
        if shuffle:
            np.random.shuffle(index_array)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            batch_ids_delay = [
                np.minimum(np.maximum(0, batch_ids - d), x[0].shape[0] - 1)
                for d in delays
            ]
            x_batch = _standardize_input_data([
                xx[batch_ids_delay, :].transpose(tt)
                for xx, tt in zip(x, tlists)
            ], ['x_batch' + str(i) for i in range(1,
                                                  len(x) + 1)])
            if y is None:
                yield x_batch
            else:
                y_batch = _standardize_input_data(y[batch_ids, :], ['y_batch'])
                if weights is not None:
                    w_batch = weights[batch_ids, :][:, 0]
                else:
                    w_batch = np.ones(x_batch[0].shape[0])
                w_batch[batch_ids < delays[-1]] = 0.
                w_batch = _standardize_sample_weights(w_batch, ['w_batch'])
                yield (x_batch, y_batch, w_batch)
예제 #2
0
파일: keras_ops.py 프로젝트: windcr/SRGAN
def _standardize_user_data(model,
                           x,
                           y,
                           sample_weight=None,
                           class_weight=None,
                           check_batch_dim=True,
                           batch_size=None):
    if not hasattr(model, 'optimizer'):
        raise Exception('You must compile a model before training/testing.'
                        ' Use `model.compile(optimizer, loss)`.')

    output_shapes = []
    for output_shape, loss_fn in zip(model.internal_output_shapes,
                                     model.loss_functions):
        if loss_fn.__name__ == 'sparse_categorical_crossentropy':
            output_shapes.append(output_shape[:-1] + (1, ))
        elif getattr(losses, loss_fn.__name__, None) is None:
            output_shapes.append(None)
        else:
            output_shapes.append(output_shape)
    x = _standardize_input_data(x,
                                model.input_names,
                                model.internal_input_shapes,
                                exception_prefix='model input')
    y = _standardize_input_data(y,
                                model.output_names,
                                output_shapes,
                                exception_prefix='model target')
    sample_weights = _standardize_sample_weights(sample_weight,
                                                 model.output_names)
    class_weights = _standardize_class_weights(class_weight,
                                               model.output_names)
    sample_weights = [
        _standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(
            y, sample_weights, class_weights, model.sample_weight_modes)
    ]
    '''
    We only need to comment out check_array_lengeh(x, y, weights) in the next line to
    let the model compile and train.
    '''
    # check_array_lengths(x, y, sample_weights)

    _check_loss_and_target_compatibility(y, model.loss_functions,
                                         model.internal_output_shapes)
    if model.stateful and batch_size:
        if x[0].shape[0] % batch_size != 0:
            raise Exception('In a stateful network, '
                            'you should only pass inputs with '
                            'a number of samples that can be '
                            'divided by the batch size. Found: ' +
                            str(x[0].shape[0]) + ' samples')
    return x, y, sample_weights
예제 #3
0
def time_delay_generator_jitter(x, y, delays, batch_size, weights=None, shuffle=True, conv3d=False, jitter=True, jitter_axes=[3,4], max_jitter=1):
    '''A generator to make it easy to fit time-delay regression models,
    i.e. a model where the value of y depends on past values of x

    # Arguments
    x: input data, as a Numpy array
    y: targets, as a Numpy array or None for prediction generation
    delays: number of time-steps to include in model
    weights: Numpy array of weights for the samples
    shuffle: Whether or not to shuffle the data (set True for training)

    # Example
    if X_train is (1000,200), Y_train is (1000,1)
    train_gen = time_delay_generator(X_train, Y_train, delays=10, batch_size=100)

    train_gen is a generator that gives:
    x_batch as size (100,10,200) since each of the 100 samples includes the input
    data at the current and nine previous time steps
    y_batch as size (100,1)
    w_batch as size (100,)

    '''
    index_array = np.arange(x.shape[0])
    if conv3d:
        tlist = [1, 2, 0] + range(3, np.ndim(x) + 1)
    else:
        tlist = [1, 0] + range(2, np.ndim(x) + 1)
    batches = _make_batches(x.shape[0], batch_size)
    while 1:
        if shuffle:
            np.random.shuffle(index_array)
        for batch_index, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            batch_ids = [np.maximum(0, batch_ids - d) for d in range(delays)]
            x_batch = x[batch_ids, :].transpose(tlist)
            if jitter:
                for j in jitter_axes:
                    x_batch = np.roll(x_batch, np.random.randint(-max_jitter,max_jitter+1), axis=j)
            x_batch = _standardize_input_data(x_batch, ['x_batch'])
            if y is None:
                yield x_batch
            else:
                y_batch = _standardize_input_data(y[batch_ids[0], :], ['y_batch'])
                if weights is not None:
                    w_batch = weights[batch_ids[0], :][:, 0]
                else:
                    w_batch = np.ones(x_batch[0].shape[0])
                w_batch[batch_ids[0] < delays] = 0.
                w_batch = _standardize_sample_weights(w_batch, ['w_batch'])
                yield (x_batch, y_batch, w_batch)
    def keras_generator(self, delays=7, batch_size=400, cell=0, scale=5, flatten=True, center=None, crop_size=None, shuffle=True, color_chan=False, log_transform_events=True, correct_eye_pos=False, gaussian_filter=0):
        from keras.engine.training import _standardize_input_data, _make_batches, _standardize_sample_weights
        
        if type(cell) is int:
            cell = [cell]

        if type(delays) is int:
            delays = range(delays)

        (stim, events, frame_numbers, weights, shifts) = self.vectorize_data(delays)


        evidx = np.where(events)[0]
        print(str(len(frame_numbers)) + ' Samples')
        print(str(len(evidx)) + ' Events')

        if correct_eye_pos:
            sh = stim.shape
            shift_stim_shape = (len(shifts),
                                sh[1] + 2*np.maximum(self.min_max_shift[1][0], -self.min_max_shift[0][0]) + 3,
                                sh[2] + 2*np.maximum(self.min_max_shift[1][1], -self.min_max_shift[0][1]) + 3)


            out_stim = np.zeros(shift_stim_shape, dtype='float32')


            shifts = shifts + [shift_stim_shape[1]/2, shift_stim_shape[2]/2]
            good_shift_locations = ~np.isnan(shifts[:, 0])
            for dd in delays:
                weights[np.minimum(np.where(np.isnan(shifts[:,0]))[0] + dd, len(weights)-1)] = 0

            for i in range(len(shifts)):
                if good_shift_locations[i]:
                    # print(-sh[1]/2 + np.int32(shifts[i, 0]))
                    # print(np.int32(shifts[i, 0]) + sh[1]/2)
                    out_stim[i, -sh[1]/2 + np.int32(shifts[i, 0]):np.int32(shifts[i, 0]) + sh[1]/2,
                                -sh[2]/2 + np.int32(shifts[i, 1]):np.int32(shifts[i, 1]) + sh[2]/2] = stim[frame_numbers[i]]

            stim = out_stim
            frame_numbers_i = np.arange(len(frame_numbers))
        else:
            frame_numbers_i = frame_numbers

        if color_chan:
            stim = stim[:, None, :, :]

        if crop_size is not None and center is not None:
            crop_range = np.arange(-crop_size/2, crop_size/2)
            stim = stim[:, (center[0]-crop_size/2):(center[0]+crop_size/2), (center[1]-crop_size/2):(center[1]+crop_size/2)]

        if flatten:
            stim = stim.reshape(stim.shape[0], -1)

        events = np.asarray(events)
        events = events[cell].T * scale


        if log_transform_events:
            events = np.log(1 + events)


        if gaussian_filter > 0:
            events = gaussian_filter1d(events, gaussian_filter)

        index_array = np.arange(events.shape[0])

        tlist = [1, 0] + list(range(2, np.ndim(stim) + 1))
        batches = _make_batches(events.shape[0], batch_size)
        while 1:
            if shuffle:
                np.random.shuffle(index_array)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                frame_numbers_b = frame_numbers[batch_ids]
                batch_ids_stim = [frame_numbers_i[np.maximum(0, batch_ids - d)] for d in delays]
                x_batch = _standardize_input_data(stim[batch_ids_stim, :].transpose(tlist), ['x_batch'])

                y_batch = _standardize_input_data(events[batch_ids, :], ['y_batch'])

                w_batch = weights[batch_ids]

                w_batch[frame_numbers_b < delays[-1]] = 0.
                w_batch = _standardize_sample_weights(w_batch, ['w_batch'])
                yield (x_batch, y_batch, w_batch)