Example #1
0
def uv_test(i, dataset, name, n_v):
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    # Convert class vectors to binary class matrices.
    x_train = x_train[i, :, :, :]
    x_train = np.expand_dims(x_train, axis=0)

    uv_path = dataset + '/uv_coordinates'
    uv = load_dense_matrix(uv_path + '/' + name + '.txt', d_type=np.float32)
    uv = np.reshape(uv, newshape=(n_v, 2))
    inputs = Input(shape=(32, 32, 3), batch_shape=(1, ) + (32, 32, 3))
    x = ImageSampling(nbatch=1, uv=uv)(inputs)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss=keras.losses.mean_absolute_error,
                  optimizer=keras.optimizers.Adam(),
                  metrics=[])
    res = model.predict(x_train, batch_size=1, verbose=0)
    res = res.astype(np.float32)
    # res = np.reshape(res, (nv*3))
    save_dir = 'E:/Users/Adrien/Documents/Keras/Gcnn/unit tests'
    for j in range(1):
        res_ = res[j, :, :]
        res_ = res_.flatten()
        # res_ = res_.squeeze(axis=0)
        np.savetxt(save_dir + '/cifar10_' + int_to_string(i) + '_' + name +
                   '.txt',
                   res_,
                   fmt='%f')
Example #2
0
    def __init__(self,
                 batch_size,
                 nv,
                 num_classes,
                 shapes,
                 desc_path,
                 labels_path,
                 patch_op_path,
                 radius,
                 nrings,
                 ndirs,
                 ratio,
                 shuffle=True,
                 augment_3d_data=False):
        'Initialization'
        self.augment_3d_data = augment_3d_data
        self.nv = nv
        if nv is None:
            self.batch_size = 1
        else:
            self.batch_size = batch_size

        # load patch op
        contributors, weights, transport, parents, angular_shifts = load_patch_op(
            shapes_names_txt=shapes,
            shapes_nv=nv,
            radius=radius,
            nrings=nrings,
            ndirs=ndirs,
            ratio=ratio,
            dataset_path=patch_op_path)
        # save shapes names
        self.names = load_names(shapes)

        # load signal
        descriptors = load_descriptors(shapes, desc_path)
        self.nsamples = descriptors.shape[0]
        self.input_dim = descriptors.shape[-1]

        # labels batch
        labels = np.arange(nv[0], dtype=np.int32)
        labels = np.expand_dims(labels, axis=0)
        labels = np.repeat(labels, repeats=batch_size, axis=0)
        self.labels_batch = keras.utils.to_categorical(labels, nv[0])

        x = [descriptors]
        self.keys = ['input_signal']
        for j in range(len(contributors)):
            self.keys.append('contributors_' + int_to_string(j))
            self.keys.append('weights_' + int_to_string(j))
            self.keys.append('transport_' + int_to_string(j))
        for j in range(len(parents)):
            self.keys.append('parents_' + int_to_string(j))
            self.keys.append('angular_shifts_' + int_to_string(j))

        for j in range(len(contributors)):
            x.append(contributors[j])
            x.append(weights[j])
            x.append(transport[j])

        for j in range(len(parents)):
            x.append(parents[j])
            x.append(angular_shifts[j])

        self.inputs = dict(zip(self.keys, x))
        # self.nsamples = np.ma.size(self.inputs['input_signal'], axis=0)

        self.shuffle = shuffle
        self.on_epoch_end()
Example #3
0
    def __init__(self,
                 num_classes,
                 desc_paths,
                 labels_paths,
                 patch_op_paths,
                 radius,
                 nrings,
                 ndirs,
                 ratio,
                 shuffle=True,
                 add_noise=False):

        self.add_noise = add_noise
        self.batch_size = 1

        nb_directories = len(labels_paths)
        self.names = []
        self.labels = []
        self.inputs = []
        nb_patch_op = len(radius)
        self.keys = ['input_signal']
        for j in range(nb_patch_op):
            self.keys.append('contributors_' + int_to_string(j))
            self.keys.append('weights_' + int_to_string(j))
            self.keys.append('transport_' + int_to_string(j))
        for j in range(nb_patch_op - 1):
            self.keys.append('parents_' + int_to_string(j))
            self.keys.append('angular_shifts_' + int_to_string(j))

        for i in range(nb_directories):
            for file in os.listdir(labels_paths[i]):
                x = []
                if file.endswith('.txt'):
                    name = file.split('.')[0]
                    print(name)
                    self.names.append(name)
                    labels = load_dense_matrix(os.path.join(
                        labels_paths[i], file),
                                               d_type=np.int32)
                    self.labels.append(np.expand_dims(labels, axis=0))
                    # nv.append(np.shape(labels_)[-1])
                    descs = []
                    for j in range(len(desc_paths[i])):
                        desc = load_dense_matrix(os.path.join(
                            desc_paths[i][j], file),
                                                 d_type=np.float32)
                        descs.append(np.expand_dims(desc, axis=0))

                    x.append(np.concatenate(descs, axis=-1))
                    patch_op_nv = list_patch_op_dir(patch_op_paths[i])

                    # load patch op
                    nv = []
                    for j in range(len(radius)):
                        nv.append(patch_op_nv[patch_op_key(
                            name, ratio[j], radius[j], nrings[j], ndirs[j])])

                    for j in range(len(radius)):
                        contributors, weights, transport = load_single_patch_op(
                            dataset_path=patch_op_paths[i],
                            name=name,
                            radius=radius[j],
                            nv=nv[j],
                            nrings=nrings[j],
                            ndirs=ndirs[j],
                            ratio=ratio[j])
                        x.append(np.expand_dims(contributors, axis=0))
                        x.append(np.expand_dims(weights, axis=0))
                        x.append(np.expand_dims(transport, axis=0))

                    for j in range(len(radius) - 1):
                        parent_vertices, angular_shifts = load_single_pool_op(
                            dataset_path=patch_op_paths[i],
                            name=name,
                            old_nv=nv[j],
                            new_nv=nv[j + 1],
                            ratio1=ratio[j],
                            ratio2=ratio[j + 1])
                        x.append(np.expand_dims(parent_vertices, axis=0))
                        x.append(np.expand_dims(angular_shifts, axis=0))
                    self.inputs.append(dict(zip(self.keys, x)))

        self.nsamples = len(self.inputs)
        self.input_dim = self.inputs[0]['input_signal'].shape[-1]
        self.num_classes = num_classes
        self.shuffle = shuffle
        self.on_epoch_end()
Example #4
0
    def __init__(self,
                 names_list,
                 preds_path,
                 patch_ops_path,
                 descs_paths,
                 radius,
                 nrings,
                 ndirs,
                 ratio,
                 shuffle=True,
                 add_noise=False,
                 num_classes=2):

        self.add_noise = add_noise
        self.batch_size = 1

        if num_classes is None:
            dtype = np.float32
            self.is_classifier = False
        else:
            dtype = np.int32
            self.is_classifier = True

        self.names = []
        self.preds = []
        self.inputs = []
        nb_patch_op = len(radius)
        self.keys = ['input_signal']
        for j in range(nb_patch_op):
            self.keys.append('contributors_' + int_to_string(j))
            self.keys.append('weights_' + int_to_string(j))
            self.keys.append('transport_' + int_to_string(j))
        for j in range(nb_patch_op - 1):
            self.keys.append('parents_' + int_to_string(j))
            self.keys.append('angular_shifts_' + int_to_string(j))

        self.names = load_names(names_list)
        self.nsamples = len(self.names)
        ext = preds_path[-4:]

        if ext == '.txt':
            preds = load_dense_matrix(preds_path, d_type=dtype)
            for i in range(self.nsamples):
                self.preds.append(np.expand_dims(preds[i, :], axis=0))
        else:
            for i in range(self.nsamples):
                labels = load_dense_matrix(os.path.join(
                    preds_path, self.names[i] + '.txt'),
                                           d_type=dtype)
                self.preds.append(np.expand_dims(labels, axis=0))

        for i in range(self.nsamples):
            x = []
            descs = []
            for j in range(len(descs_paths)):
                desc = load_dense_matrix(os.path.join(descs_paths[j],
                                                      self.names[i] + '.txt'),
                                         d_type=np.float32)
                descs.append(np.expand_dims(desc, axis=0))
            x.append(np.concatenate(descs, axis=-1))
            patch_op_nv = list_patch_op_dir(patch_ops_path)
            # load patch op
            nv = []
            for j in range(len(radius)):
                nv.append(patch_op_nv[patch_op_key(self.names[i], ratio[j],
                                                   radius[j], nrings[j],
                                                   ndirs[j])])

            for j in range(len(radius)):
                contributors, weights, transport = load_single_patch_op(
                    dataset_path=patch_ops_path,
                    name=self.names[i],
                    radius=radius[j],
                    nv=nv[j],
                    nrings=nrings[j],
                    ndirs=ndirs[j],
                    ratio=ratio[j])
                x.append(np.expand_dims(contributors, axis=0))
                x.append(np.expand_dims(weights, axis=0))
                x.append(np.expand_dims(transport, axis=0))

            for j in range(len(radius) - 1):
                parent_vertices, angular_shifts = load_single_pool_op(
                    dataset_path=patch_ops_path,
                    name=self.names[i],
                    old_nv=nv[j],
                    new_nv=nv[j + 1],
                    ratio1=ratio[j],
                    ratio2=ratio[j + 1])
                x.append(np.expand_dims(parent_vertices, axis=0))
                x.append(np.expand_dims(angular_shifts, axis=0))
            self.inputs.append(dict(zip(self.keys, x)))

        # self.nsamples = len(self.inputs)
        self.input_dim = self.inputs[0]['input_signal'].shape[-1]
        if self.is_classifier:
            self.preds_dim = num_classes
        else:
            self.preds_dim = self.preds[0].shape[-1]

        self.num_classes = num_classes
        self.shuffle = shuffle
        self.on_epoch_end()
Example #5
0
    def __init__(self, n_batch, ratio, n_v, n_rings, n_dirs,
                 inputs,
                 bin_contributors,
                 weights,
                 transport,
                 parents,
                 angular_shifts,
                 batch_norm=False,
                 nstacks=1,
                 nresblocks_per_stack=2,
                 nfilters=16,
                 sync_mode='radial_sync',
                 encode_add_inputs=None,
                 decode_add_inputs=None,
                 name=''):

        if name is not '':
            name = name + '_'

        self.encode_add_inputs = encode_add_inputs
        encoder = GcnnResnet(n_batch=n_batch, ratio=ratio, n_v=n_v, n_rings=n_rings, n_dirs=n_dirs,
                             inputs=inputs,
                             bin_contributors=bin_contributors,
                             weights=weights,
                             transport=transport,
                             parents=parents,
                             angular_shifts=angular_shifts,
                             batch_norm=batch_norm,
                             nstacks=nstacks,
                             nresblocks_per_stack=nresblocks_per_stack,
                             nfilters=nfilters,
                             sync_mode=sync_mode,
                             additional_inputs=encode_add_inputs,
                             name=name + 'encoder')

        decode_ratio = []
        decode_nv = []
        decode_nrings = []
        decode_ndirs = []

        # decode patch op
        decode_contributors = []
        decode_weights = []
        decode_transport = []
        decode_parents = []
        decode_angular_shifts = []

        for i in range(nstacks):
            decode_ratio.append(ratio[nstacks - 1 - i])
            if n_v is None:
                decode_nv.append(None)
            else:
                decode_nv.append(n_v[nstacks - 1 - i])
            decode_nrings.append(n_rings[nstacks - 1 - i])
            decode_ndirs.append(n_dirs[nstacks - 1 - i])

            decode_contributors.append(bin_contributors[nstacks-1-i])
            decode_weights.append(weights[nstacks-1-i])
            decode_transport.append(transport[nstacks-1-i])

        for i in range(nstacks-1):
            decode_parents.append(parents[nstacks-2-i])
            decode_angular_shifts.append(angular_shifts[nstacks-2-i])


        # add shortcuts:

        shortcuts_names = []
        shortcuts_list = []

        for i in range(nstacks):
            shortcuts_names.append('stack_' + int_to_string(i))
            shortcuts_list.append(encoder.get_stack(nstacks - 1 - i))
        shortcuts = dict(zip(shortcuts_names, shortcuts_list))
        self.decode_add_inputs = merge_layers_dicts(decode_add_inputs, shortcuts)
        print('zzzzzzzzzzzzzzzzzzzzz')
        print(self.decode_add_inputs)
        # self.decode_add_inputs = None

        decode_nfilters = encoder.get_output_dim()

        decoder = GcnnResnet(n_batch=n_batch, ratio=decode_ratio, n_v=decode_nv, n_rings=decode_nrings,
                             n_dirs=decode_ndirs,
                             inputs=encoder.get_output(),
                             bin_contributors=decode_contributors,
                             weights=decode_weights,
                             transport=decode_transport,
                             parents=decode_parents,
                             angular_shifts=decode_angular_shifts,
                             nstacks=nstacks,
                             nresblocks_per_stack=nresblocks_per_stack,
                             nfilters=decode_nfilters,
                             sync_mode=sync_mode,
                             batch_norm=batch_norm,
                             additional_inputs=self.decode_add_inputs,
                             name=name + 'decoder')

        self.encoder = encoder
        self.decoder = decoder
Example #6
0
    def __init__(self, n_batch, n_v, n_rings, n_dirs, ratios, name=''):

        self.contributors = []
        self.weights = []
        self.transport = []

        self.parents = []
        self.angular_shifts = []

        self.inputs_names = []
        self.inputs = []

        self.nbatch = n_batch
        self.nv = n_v
        self.ndirs = n_dirs
        self.ratios = ratios
        self.nrings = n_rings
        self.npools = len(ratios)-1

        for i in range(self.npools):
            # pool_op = PoolingOperatorFixed(parents=parents[i], angular_shifts=angular_shifts[i], batch_size=n_batch)
            if n_v is None:
                new_nv = None
            else:
                new_nv = min(n_v[i], n_v[i+1])
            self.inputs_names.append(name + 'parents_' + int_to_string(i))
            x = Input(shape=(new_nv,),
                      batch_shape=(n_batch,) + (new_nv,),
                      dtype='int32',
                      name=name + 'parents_' + int_to_string(i))
            self.inputs.append(x)
            self.parents.append(x)
            self.inputs_names.append(name + 'angular_shifts_' + int_to_string(i))
            x = Input(shape=(new_nv,),
                      batch_shape=(n_batch,) + (new_nv,),
                      dtype='float32',
                      name=name + 'angular_shifts_' + int_to_string(i))
            self.angular_shifts.append(x)
            self.inputs.append(x)

        for stack in range(self.npools+1):
            # patch_op = PatchOperatorFixed(contributors=contributors[stack],
            #                              weights=weights[stack],
            #                             angles=angles[stack])
            if n_v is None:
                patch_op_shape = (None, n_rings[stack], n_dirs[stack], 3)
            else:
                patch_op_shape = (n_v[stack], n_rings[stack], n_dirs[stack], 3)
            self.inputs_names.append(name + 'contributors_' + int_to_string(stack))
            x = Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='int32',
                      name=name + 'contributors_' + int_to_string(stack))
            self.contributors.append(x)
            self.inputs.append(x)
            x = Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                                      name=name + 'weights_' + int_to_string(stack))
            self.inputs_names.append(name + 'weights_' + int_to_string(stack))
            self.weights.append(x)
            self.inputs.append(x)

            x = Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                      name=name + 'transport_' + int_to_string(stack))
            self.inputs_names.append(name + 'transport_' + int_to_string(stack))
            self.transport.append(x)
            self.inputs.append(x)

        self.inputs_dict = dict(zip(self.inputs_names, self.inputs))
Example #7
0
    def __init__(self, n_batch, ratio, n_v, n_rings, n_dirs,
                 inputs,
                 bin_contributors,
                 weights,
                 transport,
                 parents,
                 angular_shifts,
                 batch_norm=False,
                 nstacks=1,
                 nresblocks_per_stack=2,
                 nfilters=16,
                 sync_mode='radial_sync',
                 additional_inputs=None,
                 name=''):

        """ResNet Version 1 Model builder [a]
        Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
        Last ReLU is after the shortcut connection.
        At the beginning of each stage, the feature map size is halved (downsampled)
        by a convolutional layer with strides=2, while the number of filters is
        doubled. Within each stage, the layers have the same number filters and the
        same number of filters.
        Features maps sizes:
        stage 0: 32x32, 16
        stage 1: 16x16, 32
        stage 2:  8x8,  64
        The Number of parameters is approx the same as Table 6 of [a]:
        ResNet20 0.27M
        ResNet32 0.46M
        ResNet44 0.66M
        ResNet56 0.85M
        ResNet110 1.7M
        # Arguments
            input_shape (tensor): shape of input image tensor
            depth (int): number of core convolutional layers
            num_classes (int): number of classes (CIFAR10 has 10)
        # Returns
            model (Model): Keras model instance
        """
        if name is not '':
            name = name + '_'

        bn = batch_norm
        pool_ = True

        take_max = False
        if sync_mode is 'async':
            take_max = True

        if n_v is None:
            n_v = [None for _ in range(len(n_dirs))]

        # if (depth - 2) % 6 != 0:
        #    raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
        # Start model definition.
        num_filters = nfilters
        self.nfilters = []
        # num_res_blocks = int((depth - 2) / 6)
        self.num_res_blocks = nresblocks_per_stack
        self.additional_inputs = additional_inputs
        self.nstacks = nstacks
        self.inputs_names = [name + 'input_signal']

        x = inputs

        C = bin_contributors
        W = weights
        TA = transport
        P = parents
        AS = angular_shifts
        NV = []
        for i in range(nstacks-1):
            self.inputs_names.append(name + 'parents_' + int_to_string(i))
            self.inputs_names.append(name + 'angular_shifts_' + int_to_string(i))
        for i in range(nstacks):
            self.inputs_names.append(name + 'contributors_' + int_to_string(i))
            self.inputs_names.append(name + 'weights_' + int_to_string(i))
            self.inputs_names.append(name + 'transport_' + int_to_string(i))
            NV.append(Shape(axis=1)(C[i]))



        """
        inputs = Input(shape=(n_v[0], input_dim), batch_shape=(n_batch,) + (n_v[0], input_dim),
                       name=name + 'input_signal')
        x = inputs

        # patch operator


        
        C = []
        W = []
        TA = []

        P = []
        AS = []
        
        
        for i in range(nstacks - 1):
            # pool_op = PoolingOperatorFixed(parents=parents[i], angular_shifts=angular_shifts[i], batch_size=n_batch)

            self.inputs_names.append(name + 'parents_' + int_to_string(i))
            P.append(Input(shape=(n_v[i + 1],),
                           batch_shape=(n_batch,) + (n_v[i + 1],),
                           dtype='int32',
                           name=name + 'parents_' + int_to_string(i)))
            self.inputs_names.append(name + 'angular_shifts_' + int_to_string(i))
            AS.append(Input(shape=(n_v[i + 1],),
                            batch_shape=(n_batch,) + (n_v[i + 1],),
                            dtype='float32',
                            name=name + 'angular_shifts_' + int_to_string(i)))

        for stack in range(nstacks):
            # patch_op = PatchOperatorFixed(contributors=contributors[stack],
            #                              weights=weights[stack],
            #                             angles=angles[stack])
            patch_op_shape = (n_v[stack], n_rings[stack], n_dirs[stack], 3)
            self.inputs_names.append(name + 'contributors_' + int_to_string(stack))
            C.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='int32',
                           name=name + 'contributors_' + int_to_string(stack)))
            self.inputs_names.append(name + 'weights_' + int_to_string(stack))
            W.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                           name=name + 'weights_' + int_to_string(stack)))
            self.inputs_names.append(name + 'transport_' + int_to_string(stack))
            TA.append(Input(shape=patch_op_shape, batch_shape=(n_batch,) + patch_op_shape, dtype='float32',
                            name=name + 'transport_' + int_to_string(stack)))

        """
        stack_ = 0

        if num_filters is None:
            num_filters = K.int_shape(x)[-1]

        if K.int_shape(x)[-1] is not num_filters:
            x = gcnn_resnet_layer(inputs=x,
                                  contributors=C[stack_],
                                  weights=W[stack_],
                                  angles=TA[stack_],
                                  n_v=n_v[0],
                                  n_rings=n_rings[0],
                                  n_dirs=n_dirs[0],
                                  num_filters=num_filters,
                                  sync_mode=sync_mode,
                                  batch_normalization=bn,
                                  take_max=take_max)

        self.stacks = []

        # Instantiate the stack of residual units
        for stack in range(nstacks):
            for res_block in range(self.num_res_blocks):
                if stack > 0 and res_block == 0:  # first layer but not first stack
                    # pooling
                    # num_filters = 2*num_filters
                    if pool_:
                        # num_filters = int(np.sqrt((1. * n_v[stack - 1]) / (1. * n_v[stack])) * num_filters) + 1
                        # num_filters = int(np.sqrt(ratio[stack-1] / ratio[stack] + 0.0001)*num_filters)
                        # num_filters = int(np.sqrt(ratio[stack-1] / ratio[stack] + 0.0001)*K.int_shape(x)[-1])
                        stack_ = stack
                        if ratio[stack-1] > ratio[stack]:
                            x = Pooling()([x, P[stack-1], AS[stack-1]])
                        else:
                            x = TransposedPooling(new_nv=n_v[stack],
                                                  new_ndirs=n_dirs[stack])([x, P[stack - 1],
                                                                            AS[stack - 1],
                                                                           NV[stack]])
                            """
                            if n_v[stack] is None:
                                
                                key = 'stack_' + int_to_string(stack)
                                if key in self.additional_inputs:
                                    new_nv = K.shape(self.additional_inputs[key])[1]
                                else:
                                    raise ValueError('number of vertices could not be inferred')
                                
                                
                            else:
                                new_nv = n_v[stack]
                                x = TransposedPooling(new_nv=new_nv,
                                                      new_ndirs=n_dirs[stack])([x, P[stack - 1], AS[stack - 1]])
                            """
                    else:
                        num_filters *= 2
                        stack_ = 0

                    if self.additional_inputs is not None:
                        key = 'stack_' + int_to_string(stack)
                        if key in self.additional_inputs:
                            x = Concatenate(axis=-1)([x, self.additional_inputs[key]])

                    num_filters = int(np.sqrt(ratio[stack - 1] / ratio[stack] + 0.0001) * K.int_shape(x)[-1])

                y = gcnn_resnet_layer(inputs=x,
                                      contributors=C[stack_],
                                      weights=W[stack_],
                                      angles=TA[stack_],
                                      n_v=n_v[stack_],
                                      n_rings=n_rings[stack_],
                                      n_dirs=n_dirs[stack_],
                                      num_filters=num_filters,
                                      sync_mode=sync_mode,
                                      batch_normalization=bn,
                                      take_max=take_max)
                y = gcnn_resnet_layer(inputs=y,
                                      contributors=C[stack_],
                                      weights=W[stack_],
                                      angles=TA[stack_],
                                      n_v=n_v[stack_],
                                      n_rings=n_rings[stack_],
                                      n_dirs=n_dirs[stack_],
                                      num_filters=num_filters,
                                      sync_mode=sync_mode,
                                      batch_normalization=bn,
                                      take_max=take_max,
                                      activation=None)

                if stack > 0 and res_block == 0:  # first layer but not first stack
                    # linear projection residual shortcut connection to match
                    # changed dims
                    x = Dense(units=num_filters, use_bias=False, activation=None)(x)

                    # x = Dropout(0.25)(x)

                x = keras.layers.add([x, y])

                if res_block == self.num_res_blocks-1:
                    # save stack
                    self.stacks.append(x)

                x = Activation('relu')(x)


            # if stack > 0:
            #    num_filters = int(np.sqrt((1. * n_v[stack - 1]) / (1. * n_v[stack])) * num_filters)

        """
        # Add classifier on top.
        # v1 does not use BN after last shortcut connection-ReLU
        x = AngularMaxPooling(r=1, take_max=True)(x)
        # x = AngularAveragePooling(r=1, take_average=True)(x)
        x = GlobalAveragePooling1D()(x)
        y = Dense(num_classes,
                  kernel_initializer='he_normal',
                  name='final_vote')(x)
        outputs = Activation('softmax')(y)
        """

        # Instantiate model.
        self.output_dim = num_filters
        self.output = x
        self.input = inputs
        self.inputs_list = [self.input]

        for i in range(len(C)):
            self.inputs_list.append(C[i])
            self.inputs_list.append(W[i])
            self.inputs_list.append(TA[i])
        for i in range(len(P)):
            self.inputs_list.append(P[i])
            self.inputs_list.append(AS[i])
        self.inputs_dict = dict(zip(self.inputs_names, self.inputs_list))
Example #8
0
def shape_dataset_segmentation(train_txt,
                               test_txt,
                               patch_op_path,
                               desc_path,
                               input_dim,
                               nclasses,
                               labels_path,
                               radius,
                               nbatch,
                               nv,
                               nrings,
                               ndirs,
                               ratio,
                               nepochs,
                               generator=None,
                               classes=None,
                               save_dir=None,
                               model_name='model'):
    if model_name is 'async':
        sync_mode = 'async'
    else:
        sync_mode = 'radial_sync'

    # create model

    model = gcnn_resnet_v1(n_batch=nbatch,
                           ratio=ratio,
                           n_v=nv,
                           n_rings=nrings,
                           n_dirs=ndirs,
                           fixed_patch_op=False,
                           contributors=None,
                           weights=None,
                           angles=None,
                           parents=None,
                           angular_shifts=None,
                           batch_norm=False,
                           uv=None,
                           input_dim=input_dim,
                           nstacks=1,
                           nresblocks_per_stack=2,
                           nfilters=16,
                           sync_mode=sync_mode,
                           num_classes=nclasses)

    # load patch op
    train_c, train_w, train_t_a, train_p, train_a_s = load_patch_op(
        shapes_names_txt=train_txt,
        shapes_nv=nv,
        radius=radius,
        nrings=nrings,
        ndirs=ndirs,
        ratio=ratio,
        dataset_path=patch_op_path)

    test_c, test_w, test_t_a, test_p, test_a_s = load_patch_op(
        shapes_names_txt=test_txt,
        shapes_nv=nv,
        radius=radius,
        nrings=nrings,
        ndirs=ndirs,
        ratio=ratio,
        dataset_path=patch_op_path)

    # load signal

    train_desc = load_descriptors(train_txt, desc_path)
    n_train_samples = train_desc.shape[0]

    test_desc = load_descriptors(test_txt, desc_path)
    n_test_samples = test_desc.shape[0]

    # load labels

    y_train = load_labels(train_txt, labels_path, nclasses)
    y_test = load_labels(test_txt, labels_path, nclasses)

    x_train = [train_desc]
    x_test = [test_desc]

    input_names = ['input_signal']
    for j in range(len(train_c)):
        input_names.append('contributors_' + int_to_string(j))
        input_names.append('weights_' + int_to_string(j))
        input_names.append('transport_' + int_to_string(j))
    for j in range(len(train_p)):
        input_names.append('parents_' + int_to_string(j))
        input_names.append('angular_shifts_' + int_to_string(j))

    for j in range(len(train_c)):
        x_train.append(train_c[j])
        x_train.append(train_w[j])
        x_train.append(train_t_a[j])

        x_test.append(test_c[j])
        x_test.append(test_w[j])
        x_test.append(test_t_a[j])

    for j in range(len(train_p)):
        x_train.append(train_p[j])
        x_train.append(train_a_s[j])

        x_test.append(test_p[j])
        x_test.append(test_a_s[j])

    print('shapes !!!')
    for x_ in x_test:
        print(np.shape(x_))

    x_train = dict(zip(input_names, x_train))
    x_test = dict(zip(input_names, x_test))

    # train model

    opt = 'adam'

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    model.summary()

    if generator is None:
        history = model.fit(x_train,
                            y_train,
                            batch_size=nbatch,
                            epochs=nepochs,
                            validation_data=(x_test, y_test),
                            shuffle=True)
    else:
        training_generator = generator(x_train,
                                       y_train,
                                       nbatch,
                                       nv,
                                       n_classes=nclasses,
                                       shuffle=True)
        test_generator = generator(x_test,
                                   y_test,
                                   nbatch,
                                   nv,
                                   n_classes=nclasses,
                                   shuffle=True)

        history = model.fit_generator(generator=training_generator,
                                      steps_per_epoch=n_train_samples / nbatch,
                                      epochs=nepochs,
                                      validation_data=test_generator,
                                      validation_steps=1,
                                      use_multiprocessing=False,
                                      workers=1)

    # Score trained model.
    scores = model.evaluate(x_test, y_test, verbose=1, batch_size=nbatch)
    print('Test loss:', scores[0])
    print('Test accuracy:', scores[1])

    if save_dir is not None:
        # Save model and weights
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
        model_path = os.path.join(save_dir, model_name)
        weights_path = os.path.join(save_dir, model_name + '_weights.h5')

        # Option 1: Save Weights + Architecture
        model.save_weights(weights_path)
        with open(model_path + '.json', 'w') as f:
            f.write(model.to_json())
        model.save(model_path + '.h5')

        print('Saved trained model at %s ' % model_path)

        # confusion matrix

        # plot confusion matrix

        y_pred = model.predict(x_test, batch_size=nbatch, verbose=0)

        # plot_confusion_mat_(y_true=y_test, y_pred=y_pred, classes=classes,
        #                     save_path=os.path.join(save_dir, model_name + '_conf_mat'))
        plt_history(history=history,
                    save_path=os.path.join(save_dir, model_name + '_history'))
    else:
        plt_history(history=history, save_path=None)