Exemple #1
0
    def __init__(self,
                 network=None,
                 log=False,
                 dataset="mnist",
                 model="Small",
                 batch_size=128,
                 nb_byz_worker=0):
        self.log = log

        self.network = network
        self.nb_byz_worker = nb_byz_worker
        self.batch_size = batch_size

        dsm = DatasetManager(network, dataset, self.batch_size)
        self.train_data, self.test_data = dsm.data_train, dsm.data_test

        devices = tf.config.list_physical_devices('gpu')
        if len(devices) == 1:
            self.strategy = tf.distribute.OneDeviceStrategy()
        elif len(devices) > 1:
            self.strategy = tf.distribute.MirroredStrategy()
        else:
            self.strategy = tf.distribute.get_strategy()

        with self.strategy.scope():
            mdm = ModelManager(model=model, info=dsm.ds_info)
            self.model = mdm.model

            self.loss_fn = SparseCategoricalCrossentropy(
                from_logits=True, reduction=tf.keras.losses.Reduction.NONE)

        ps_hosts = network.get_all_ps()
        worker_hosts = network.get_all_other_worker()
        self.ps_connections = [tools.set_connection(host) for host in ps_hosts]
        self.worker_connections = [
            tools.set_connection(host) for host in worker_hosts
        ]

        self.port = network.get_my_port()
        self.task_id = network.get_task_index()

        self.m = tf.keras.metrics.Accuracy()

        # Define grpc server
        self.service = grpc_message_exchange_servicer.MessageExchangeServicer(
            tools.flatten_weights(self.model.trainable_variables))

        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=30),
                                  options=[('grpc.max_send_message_length',
                                            500 * 1024 * 1024),
                                           ('grpc.max_receive_message_length',
                                            500 * 1024 * 1024)])
        garfield_pb2_grpc.add_MessageExchangeServicer_to_server(
            self.service, self.server)
        self.server.add_insecure_port('[::]:' + str(self.port))

        self.aggregated_weights = None

        self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)
Exemple #2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()
        self.convs = []
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = GraphConv(inc,
                              hid,
                              bias=use_bias,
                              activation=activations.get(act))
            self.convs.append(layer)
            inc = hid

        layer = GraphConv(inc, out_channels, bias=use_bias)
        self.convs.append(layer)
        self.dropout = layers.Dropout(dropout)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=True,
                 aggregator='mean',
                 output_normalize=False,
                 n_samples=[15, 5]):

        Agg = _AGG.get(aggregator, None)
        if not Agg:
            raise ValueError(
                f"Invalid value of 'aggregator', allowed values {tuple(_AGG.keys())}, but got '{aggregator}'."
            )

        _intx = intx()
        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        nodes = Input(batch_shape=[None], dtype=_intx, name='nodes')
        neighbors = [
            Input(batch_shape=[None], dtype=_intx, name=f'neighbors_{hop}')
            for hop, n_sample in enumerate(n_samples)
        ]

        aggregators = []
        for hidden, activation in zip(hiddens, activations):
            # you can use `GCNAggregator` instead
            aggregators.append(
                Agg(hidden,
                    concat=True,
                    activation=activation,
                    use_bias=use_bias,
                    kernel_regularizer=regularizers.l2(l2_norm)))

        aggregators.append(Agg(out_channels, use_bias=use_bias))

        h = [tf.nn.embedding_lookup(x, node) for node in [nodes, *neighbors]]
        for agg_i, aggregator in enumerate(aggregators):
            attribute_shape = h[0].shape[-1]
            for hop in range(len(n_samples) - agg_i):
                neighbor_shape = [-1, n_samples[hop], attribute_shape]
                h[hop] = aggregator(
                    [h[hop], tf.reshape(h[hop + 1], neighbor_shape)])
                if hop != len(n_samples) - 1:
                    h[hop] = Dropout(rate=dropout)(h[hop])
            h.pop()

        h = h[0]
        if output_normalize:
            h = tf.nn.l2_normalize(h, axis=1)

        super().__init__(inputs=[x, nodes, *neighbors], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
Exemple #4
0
def main():
    dataset = CIFAR10(
        binary=True, validation_split=0.0)  # not using validation for anything
    model = mobilenet_v2_like(dataset.input_shape, dataset.num_classes)

    model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=SGDW(lr=0.01, momentum=0.9, weight_decay=1e-5),
                  metrics=['accuracy'])
    model.summary()

    batch_size = 128

    train_data = dataset.train_dataset() \
        .shuffle(8 * batch_size) \
        .batch(batch_size) \
        .prefetch(tf.data.experimental.AUTOTUNE)
    valid_data = dataset.test_dataset() \
        .batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    def lr_schedule(epoch):
        if 0 <= epoch < 35:
            return 0.01
        if 35 <= epoch < 65:
            return 0.005
        return 0.001

    model.fit(train_data,
              validation_data=valid_data,
              epochs=80,
              callbacks=[LearningRateScheduler(lr_schedule)])
    model.save("cnn-cifar10-binary.h5")
Exemple #5
0
def generate_adversarial_perturbation(model, img_base, delta, base_pred,
                                      target_pred, eps, optimizer, steps):

    for step in range(steps):
        with tensorflow.GradientTape() as tape:

            tape.watch(delta)

            adversarial_example = img_base + delta

            prediction = model(adversarial_example, training=False)

            base_pred_labels = fen2labels(base_pred)
            target_pred_labels = fen2labels(target_pred)

            loss = SparseCategoricalCrossentropy(from_logits=True,
                                                 reduction="auto")
            original_loss = -loss(
                tensorflow.convert_to_tensor(base_pred_labels), prediction)
            target_loss = loss(
                tensorflow.convert_to_tensor(target_pred_labels), prediction)

            total_loss = target_loss + original_loss

            if step % 10 == 0:
                print("step: {}, loss: {}...".format(step, total_loss.numpy()))

        gradients = tape.gradient(total_loss, delta)

        optimizer.apply_gradients([(gradients, delta)])
        delta.assign_add(clip_eps(delta, eps=eps))

    return delta
Exemple #6
0
    def build(self, hiddens=[16], activations=['relu'], dropout=0.5, l2_norm=5e-4, lr=0.01,
              use_bias=False):

        with tf.device(self.device):

            n_nodes = self.graph.n_nodes
            x = Input(batch_shape=[None, self.graph.n_attrs],
                      dtype=self.floatx, name='attr_matrix')
            wavelet = Input(batch_shape=[n_nodes, n_nodes],
                            dtype=self.floatx, sparse=True, name='wavelet_matrix')
            inverse_wavelet = Input(batch_shape=[n_nodes, n_nodes], dtype=self.floatx, sparse=True,
                                    name='inverse_wavelet_matrix')
            index = Input(batch_shape=[None],
                          dtype=self.intx, name='node_index')

            h = x
            for hidden, activation in zip(hiddens, activations):
                h = WaveletConvolution(hidden, activation=activation, use_bias=use_bias,
                                       kernel_regularizer=regularizers.l2(l2_norm))([h, wavelet, inverse_wavelet])
                h = Dropout(rate=dropout)(h)

            h = WaveletConvolution(self.graph.n_classes, use_bias=use_bias)(
                [h, wavelet, inverse_wavelet])
            h = Gather()([h, index])

            model = Model(
                inputs=[x, wavelet, inverse_wavelet, index], outputs=h)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr), metrics=['accuracy'])

            self.model = model
Exemple #7
0
    def process(self,
                train_nodes,
                unlabeled_nodes,
                self_training_labels,
                hids,
                use_relu,
                reset=True):

        self.ll_ratio = None

        with tf.device(self.device):
            self.train_nodes = gf.astensor(train_nodes, dtype=self.intx)
            self.unlabeled_nodes = gf.astensor(unlabeled_nodes, dtype=self.intx)
            self.labels_train = gf.astensor(self.graph.node_label[train_nodes], dtype=self.intx)
            self.self_training_labels = gf.astensor(self_training_labels, dtype=self.intx)
            self.adj_tensor = gf.astensor(self.graph.adj_matrix.A, dtype=self.floatx)
            self.x_tensor = gf.astensor(self.graph.node_attr, dtype=self.floatx)
            self.build(hids=hids)
            self.use_relu = use_relu
            self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)

            self.adj_changes = tf.Variable(tf.zeros_like(self.adj_tensor))
            self.x_changes = tf.Variable(tf.zeros_like(self.x_tensor))

        if reset:
            self.reset()
        return self
def get_mlp_model():
    model = Sequential([
        Flatten(input_shape=X_train_grayscale[0].shape, name='flatten'),
        Dense(units=128,
              activation=relu,
              kernel_initializer=he_uniform(),
              bias_initializer=he_uniform(),
              name='dense_1'),
        Dense(units=64,
              activation=relu,
              kernel_initializer=he_uniform(),
              bias_initializer=he_uniform(),
              name='dense_2'),
        Dense(units=32,
              activation=relu,
              kernel_initializer=he_uniform(),
              bias_initializer=he_uniform(),
              name='dense_3'),
        Dense(units=10,
              activation=softmax,
              kernel_initializer=he_uniform(),
              bias_initializer=he_uniform(),
              name='dense_output')
    ])
    model.compile(
        optimizer=Adam(),
        loss=SparseCategoricalCrossentropy(),
        metrics=['accuracy'])
    return model
Exemple #9
0
    def __init__(self, in_features, out_features,
                 hids=[64], acts=['relu'],
                 dropout=0.5, weight_decay=5e-3,
                 lr=0.01, bias=False, K=10):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid, use_bias=bias, activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)
            h = Dropout(dropout)(h)

        h = Dense(out_features, use_bias=bias, activation=acts[-1],
                  kernel_regularizer=regularizers.l2(weight_decay))(h)
        h = Dropout(dropout)(h)

        h = PropConvolution(K, use_bias=bias, activation='sigmoid',
                            kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Exemple #10
0
def main():
    dataset = SpeechCommands("/datasets/speech_commands_v0.02")
    model = get_model(dataset.input_shape, dataset.num_classes)

    model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=AdamW(lr=0.0005, weight_decay=1e-5),
                  metrics=['accuracy'])
    model.summary()

    batch_size = 100

    train_data = dataset.train_dataset() \
        .shuffle(8 * batch_size) \
        .batch(batch_size) \
        .prefetch(tf.data.experimental.AUTOTUNE)
    valid_data = dataset.test_dataset() \
        .batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    def lr_schedule(epoch):
        if 0 <= epoch < 20:
            return 0.0005
        if 20 <= epoch < 40:
            return 0.0001
        return 0.00002

    model.fit(train_data,
              validation_data=valid_data,
              epochs=50,
              callbacks=[LearningRateScheduler(lr_schedule)])
    model.save("cnn-speech-commands.h5")
Exemple #11
0
    def __init__(self, in_features,
                 out_features, hids=[16], num_heads=[8],
                 acts=['elu'], dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01, bias=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, num_head, act in zip(hids, num_heads, acts):
            h = GraphAttention(hid, attn_heads=num_head,
                               reduction='concat',
                               use_bias=bias,
                               activation=act,
                               kernel_regularizer=regularizers.l2(weight_decay),
                               attn_kernel_regularizer=regularizers.l2(
                                   weight_decay),
                               )([h, adj])
            h = Dropout(rate=dropout)(h)

        h = GraphAttention(out_features, use_bias=bias,
                           attn_heads=1, reduction='average')([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Exemple #12
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, order=2, use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(), name='node_attr')
        adj = [Input(batch_shape=[None, None],
                     dtype=floatx(), sparse=True,
                     name=f'adj_matrix_{i}') for i in range(order + 1)]
        index = Input(batch_shape=[None], dtype=intx(), name='node_index')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = ChebyConvolution(hidden, order=order, use_bias=use_bias,
                                 activation=activation,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])
            h = Dropout(rate=dropout)(h)

        h = ChebyConvolution(out_channels,
                             order=order, use_bias=use_bias)([h, adj])
        h = Gather()([h, index])

        super().__init__(inputs=[x, *adj, index], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Exemple #13
0
def load_training_model(g, vocab_size):
    with select_strategy().scope():
        if g['network-type'] == 'transformer':
            model = transformer(vocab_size, 128, 2048, 0.2, 8, 16,
                                g['sequence-length'], True)
            opt = RMSprop(learning_rate = g['learning-rate'])
        elif g['network-type'] == 'lstm':
            model = lstm_model(vocab_size,
                               g['embedding-size'],
                               g['lstm1-units'],
                               g['lstm2-units'],
                               g['dropout'],
                               g['recurrent-dropout'],
                               False, g['batch-size'])
            opt = RMSprop(learning_rate = g['learning-rate'])
        elif g['network-type'] == 'gpt2':
            set_gpt2_params(g, vocab_size)
            model = gpt2.GPT2()
            opt = Adam(learning_rate = g['learning-rate'], epsilon=1e-08)
        else:
            assert False
        loss_fn = SparseCategoricalCrossentropy(from_logits = True)
        metrics = ['sparse_categorical_accuracy']
        model.compile(
            optimizer = opt,
            loss = loss_fn,
            metrics = metrics)
        model(tf.constant([[0]]))
    return model
Exemple #14
0
    def __init__(self,
                 num_layers,
                 d_model,
                 num_heads,
                 dff,
                 input_vocab_size,
                 target_vocab_size,
                 pe_input,
                 pe_target,
                 rate=0.1):
        super(Transformer, self).__init__()

        self.num_layers = num_layers
        self.num_heads = num_heads
        self.d_model = d_model

        self.train_loss = tf.keras.metrics.Mean(name='train_loss')
        self.train_accuracy = tf.keras.metrics.Mean(name='train_accuracy')

        self.encoder = Encoder(num_layers, d_model, num_heads, dff,
                               input_vocab_size, pe_input, rate)

        self.decoder = Decoder(num_layers, d_model, num_heads, dff,
                               target_vocab_size, pe_target)

        self.dense = Dense(target_vocab_size)

        self.loss_object = SparseCategoricalCrossentropy(from_logits=True,
                                                         reduction='none')
Exemple #15
0
def cnn2D(inputSize,
          opt='adam',
          loss=SparseCategoricalCrossentropy(from_logits=True),
          classes=10):
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (5, 5),
                      activation='relu',
                      padding="same",
                      input_shape=inputSize + (1, )))
    model.add(layers.BatchNormalization())
    model.add(layers.Dropout(rate=0.3))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), padding="same", activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Dropout(rate=0.3))
    model.add(layers.MaxPooling2D((2, 2)))
    # model.add(layers.Conv2D(64, (5, 5), padding="same", activation='relu'))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Dense(classes))

    model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
    return model
Exemple #16
0
def main():
    dataset = FashionMNIST()
    model = get_model(dataset.input_shape, dataset.num_classes)

    model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=AdamW(lr=0.001, weight_decay=5e-5),
                  metrics=['accuracy'])
    model.summary()

    batch_size = 128

    train_data = dataset.train_dataset() \
        .shuffle(8 * batch_size) \
        .batch(batch_size) \
        .prefetch(tf.data.experimental.AUTOTUNE)
    valid_data = dataset.test_dataset() \
        .batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    model.fit(train_data,
              validation_data=valid_data,
              epochs=40,
              callbacks=[
                  LearningRateScheduler(lambda e: 0.001 if e < 25 else 0.0001)
              ])
    model.save("cnn-fashion-mnist.h5")
Exemple #17
0
    def __init__(self, num_classes: int, backbone, num_queries=100, **kwargs):
        super().__init__(**kwargs)

        self.num_classes = num_classes
        self.num_queries = num_queries
        self.hidden_dim = 256

        self.backbone = backbone
        self.input_proj = tf.keras.layers.Conv2D(self.hidden_dim, 1)
        self.pos_embed = PositionEmbeddingSine(output_dim=self.hidden_dim)
        self.transformer_num_layers = 6
        self.transformer = Transformer(num_layers=self.transformer_num_layers,
                                       d_model=self.hidden_dim,
                                       num_heads=8,
                                       dim_feedforward=2048)

        self.bbox_embed = tf.keras.models.Sequential([
            tf.keras.layers.Dense(self.hidden_dim, activation='relu'),
            tf.keras.layers.Dense(self.hidden_dim, activation='relu'),
            tf.keras.layers.Dense(4, activation='sigmoid',
                                  dtype=tf.float32)  # (x1, y1, x2, y2)
        ])
        self.class_embed = tf.keras.layers.Dense(num_classes + 1,
                                                 dtype=tf.float32)

        # Will create a learnable embedding matrix for all our queries
        # It is a matrix of [num_queries, self.hidden_dim]
        # The embedding layers
        self.query_embed = tf.keras.layers.Embedding(num_queries,
                                                     self.hidden_dim)
        self.all_the_queries = tf.range(num_queries)

        # Loss computation
        self.weight_class, self.weight_l1, self.weight_giou = 1, 5, 2
        similarity_func = DetrSimilarity(self.weight_class, self.weight_l1,
                                         self.weight_giou)
        self.target_assigner = TargetAssigner(similarity_func,
                                              hungarian_matching,
                                              lambda gt, pred: gt,
                                              negative_class_weight=1.0)

        # Relative classification weight applied to the no-object category
        # It down-weight the log-probability term of a no-object
        # by a factor 10 to account for class imbalance
        self.non_object_weight = tf.constant(0.1, dtype=self.compute_dtype)

        # Losses
        self.giou = GIoULoss(reduction=tf.keras.losses.Reduction.NONE)
        self.l1 = L1Loss(reduction=tf.keras.losses.Reduction.NONE)
        self.scc = SparseCategoricalCrossentropy(
            reduction=tf.keras.losses.Reduction.NONE, from_logits=True)

        # Metrics
        self.giou_metric = tf.keras.metrics.Mean(name="giou_last_layer")
        self.l1_metric = tf.keras.metrics.Mean(name="l1_last_layer")
        self.scc_metric = tf.keras.metrics.Mean(name="scc_last_layer")
        self.loss_metric = tf.keras.metrics.Mean(name="loss")
        self.precision_metric = tf.keras.metrics.SparseCategoricalAccuracy()
        # Object recall = foreground
        self.recall_metric = tf.keras.metrics.Mean(name="object_recall")
Exemple #18
0
    def __init__(self, in_features, out_features,
                 hids=[16], acts=['relu'], dropout=0.5,
                 weight_decay=5e-4, lr=0.01, bias=False):

        _intx = intx()
        _floatx = floatx()
        x = Input(batch_shape=[None, in_features],
                  dtype=_floatx, name='node_attr')
        edge_index = Input(batch_shape=[None, 2], dtype=_intx,
                           name='edge_index')
        edge_weight = Input(batch_shape=[None], dtype=_floatx,
                            name='edge_weight')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphEdgeConvolution(hid, use_bias=bias,
                                     activation=act,
                                     kernel_regularizer=regularizers.l2(weight_decay))([h, edge_index, edge_weight])

            h = Dropout(rate=dropout)(h)

        h = GraphEdgeConvolution(out_features, use_bias=bias)(
            [h, edge_index, edge_weight])

        super().__init__(inputs=[x, edge_index, edge_weight], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
Exemple #19
0
    def build(self,
              hiddens=[32],
              activations=['relu'],
              dropout=0.5,
              l2_norm=5e-4,
              lr=0.01,
              use_bias=False):

        with tf.device(self.device):

            x = Input(batch_shape=[None, self.graph.n_attrs],
                      dtype=self.floatx,
                      name='attr_matrix')
            adj = Input(batch_shape=[None, None],
                        dtype=self.floatx,
                        sparse=True,
                        name='adj_matrix')

            h = x
            for hidden, activation in zip(hiddens, activations):
                h = Dense(hidden,
                          use_bias=use_bias,
                          activation=activation,
                          kernel_regularizer=regularizers.l2(l2_norm))(h)
                h = Dropout(rate=dropout)(h)

            h = GraphConvolution(self.graph.n_classes,
                                 use_bias=use_bias)([h, adj])

            model = Model(inputs=[x, adj], outputs=h)
            model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                          optimizer=Adam(lr=lr),
                          metrics=['accuracy'])
            self.model = model
Exemple #20
0
    def __init__(self, anchor_ratios=(0.5, 1, 2), **kwargs):
        super().__init__(
            2,
            SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE,
                                          from_logits=True),
            L1Loss(reduction=tf.keras.losses.Reduction.NONE),
            multiples=len(anchor_ratios),
            kernel_initializer_classification_head=initializers.RandomNormal(stddev=0.01),
            kernel_initializer_box_prediction_head=initializers.RandomNormal(stddev=0.01),
            **kwargs)

        #Force each ground_truths to match to at least one anchor
        matcher = Matcher([0.3, 0.7], [0, -1, 1], allow_low_quality_matches=True)
        self.target_assigner = TargetAssigner(IoUSimilarity(),
                                              matcher,
                                              encode_boxes_faster_rcnn,
                                              dtype=self._compute_dtype)

        anchor_strides = (4, 8, 16, 32, 64)
        anchor_zises = (32, 64, 128, 256, 512)
        self._anchor_ratios = anchor_ratios

        # Precompute a deterministic grid of anchors for each layer of the pyramid.
        # We will extract a subpart of the anchors according to
        self._anchors = [
            Anchors(stride, size, self._anchor_ratios)
            for stride, size in zip(anchor_strides, anchor_zises)
        ]
Exemple #21
0
def generate_model(output_col, input_columns, model_num, model_label):

    ### Import data
    x = np.loadtxt('./data/' + dataset, delimiter=',', usecols=input_columns)
    y = np.loadtxt('./data/' + dataset, delimiter=',', usecols=(output_col))

    ### Build model
    model = Sequential()
    model.add(Dense(20, input_dim=len(input_columns), activation='relu'))
    model.add(Dense(60, activation='relu'))
    model.add(Dense(60, activation='relu'))
    model.add(Dense(58, activation='softmax'))

    model.compile(
        loss=SparseCategoricalCrossentropy(
            from_logits=False,
            reduction="auto",
            name="sparse_categorical_crossentropy"  #For integer classes
        ),
        optimizer='Adam',
        metrics=['accuracy'])

    ### Train and evaluate
    model.fit(x, y, epochs=10)
    scores = model.evaluate(x, y)
    print(model.metrics_names[1], scores[1] * 100)

    name = 'P' + str(model_num)

    ### Save model
    model.save('./models/' + method + '/' + model_label + '/' + name)
Exemple #22
0
    def __init__(self, in_features, out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, bias=False,
                 experimental_run_tf_function=True):

        x = Input(batch_shape=[None, in_features],
                  dtype=floatx(), name='node_attr')
        adj = Input(batch_shape=[None, None], dtype=floatx(),
                    sparse=True, name='adj_matrix')

        h = x
        for hid, act in zip(hids, acts):
            h = GraphConvolution(hid, use_bias=bias,
                                 activation=act,
                                 kernel_regularizer=regularizers.l2(weight_decay))([h, adj])

            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_features, use_bias=bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'],
                     experimental_run_tf_function=experimental_run_tf_function)
Exemple #23
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[32],
                 activations=['relu'],
                 dropout=0.5,
                 l2_norm=5e-4,
                 lr=0.01,
                 use_bias=False):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='attr_matrix')
        adj = Input(batch_shape=[None, None],
                    dtype=floatx(),
                    sparse=True,
                    name='adj_matrix')

        h = x
        for hidden, activation in zip(hiddens, activations):
            h = Dense(hidden,
                      use_bias=use_bias,
                      activation=activation,
                      kernel_regularizer=regularizers.l2(l2_norm))(h)
            h = Dropout(rate=dropout)(h)

        h = GraphConvolution(out_channels, use_bias=use_bias)([h, adj])

        super().__init__(inputs=[x, adj], outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'])
def construct_network2():
    model = tf.keras.models.Sequential([
        InputLayer(input_shape=(28, 28, 1)),
        Conv2D(20,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Conv2D(30,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Conv2D(50,
               3,
               kernel_initializer=RandomNormal(0, 0.01),
               padding="same",
               activation="relu"),
        BatchNormalization(),
        MaxPool2D(pool_size=2, strides=2, padding="same"),
        Flatten(),
        Dense(10, kernel_initializer=RandomNormal(0, 0.01)),
    ])
    model.compile(
        optimizer=SGD(learning_rate=0.01, momentum=0.9),
        loss=SparseCategoricalCrossentropy(from_logits=True),
        metrics=["accuracy"],
    )
    model.summary()
    return model
Exemple #25
0
def define_learning_model():
    model = Sequential()

    model.add(Rescaling(1. / 255, input_shape=(800, 600, 3)))
    model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))

    model.add(Flatten())

    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.2))

    model.add(Dense(5, activation='softmax'))

    model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])

    model.summary()
    return model
Exemple #26
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False,
                 experimental_run_tf_function=True):

        x = Input(batch_shape=[None, in_channels],
                  dtype=floatx(),
                  name='node_attr')

        h = x
        for hid, act in zip(hids, acts):
            h = Dense(hid,
                      use_bias=use_bias,
                      activation=act,
                      kernel_regularizer=regularizers.l2(weight_decay))(h)

            h = Dropout(rate=dropout)(h)

        h = Dense(out_channels, use_bias=use_bias)(h)

        super().__init__(inputs=x, outputs=h)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr),
                     metrics=['accuracy'],
                     experimental_run_tf_function=experimental_run_tf_function)
Exemple #27
0
def main():
    BATCH_SIZE = 64
    EPOCHS = 3

    optimizer = Adam()
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    loss_fn = SparseCategoricalCrossentropy()

    # Load data
    ds = dataset.load("abhinavtuli/fashion-mnist")

    # transform into Tensorflow dataset
    ds = ds.to_tensorflow()

    # Splitting back into the original train and test sets
    train_dataset = ds.take(60000)
    test_dataset = ds.skip(60000)

    train_dataset = train_dataset.batch(BATCH_SIZE)
    test_dataset = test_dataset.batch(BATCH_SIZE)

    model = create_CNN()
    # model.summary()

    for epoch in range(EPOCHS):
        print("\nStarting Training Epoch {}".format(epoch))
        train(model, train_dataset, optimizer, loss_fn, train_acc_metric)
        print("Training Epoch {} finished\n".format(epoch))
        test(model, test_dataset, test_acc_metric)
Exemple #28
0
def create_CNN_model(X_train, y_train, X_test, y_test, X_val, y_val):
    # Reshape data
    X_train_flattened = X_train.reshape(len(X_train), 28 * 28)
    X_test_flattened = X_test.reshape(len(X_test), 28 * 28)
    X_val_flattened = X_val.reshape(len(X_val), 28 * 28)

    cnn = Sequential()
    cnn.add(Dense(units=128, activation='relu'))
    cnn.add(Dense(units=64, activation='relu'))
    cnn.add(Dense(units=10, activation='softmax'))

    callback = EarlyStopping(monitor='val_loss',
                             patience=10,
                             restore_best_weights=True)
    cnn.compile(optimizer=Adam(),
                loss=SparseCategoricalCrossentropy(),
                metrics=['accuracy'])
    history = cnn.fit(x=X_train_flattened,
                      y=y_train,
                      validation_data=(X_val_flattened, y_val),
                      epochs=600,
                      batch_size=32,
                      verbose=False,
                      callbacks=[callback])

    test_loss, test_acc = cnn.evaluate(x=X_test_flattened, y=y_test)

    print(f"Test loss: {test_loss}")
    print(f"Test accuracy: {test_acc}")

    predictions = cnn.predict(X_test_flattened)

    cnn.save('trained_model.h5')

    return predictions
Exemple #29
0
    def __init__(self,
                 model,
                 lstm_size=64,
                 lstm_num_layers=1,
                 tanh_constant=1.5,
                 cell_exit_extra_step=False,
                 skip_target=0.4,
                 temperature=None,
                 branch_bias=0.25,
                 entropy_reduction='sum'):
        super().__init__(model)
        self.tanh_constant = tanh_constant
        self.temperature = temperature
        self.cell_exit_extra_step = cell_exit_extra_step

        cells = [
            LSTMCell(units=lstm_size, use_bias=False)
            for _ in range(lstm_num_layers)
        ]
        self.lstm = RNN(cells, stateful=True)
        self.g_emb = tf.random.normal((1, 1, lstm_size)) * 0.1
        self.skip_targets = tf.constant([1.0 - skip_target, skip_target])

        self.max_layer_choice = 0
        self.bias_dict = {}
        for mutable in self.mutables:
            if isinstance(mutable, LayerChoice):
                if self.max_layer_choice == 0:
                    self.max_layer_choice = len(mutable)
                assert self.max_layer_choice == len(mutable), \
                        "ENAS mutator requires all layer choice have the same number of candidates."
                if 'reduce' in mutable.key:
                    bias = []
                    for choice in mutable.choices:
                        if 'conv' in str(type(choice)).lower():
                            bias.append(branch_bias)
                        else:
                            bias.append(-branch_bias)
                    self.bias_dict[mutable.key] = tf.constant(bias)

        # exposed for trainer
        self.sample_log_prob = 0
        self.sample_entropy = 0
        self.sample_skip_penalty = 0

        # internal nn layers
        self.embedding = Embedding(self.max_layer_choice + 1, lstm_size)
        self.soft = Dense(self.max_layer_choice, use_bias=False)
        self.attn_anchor = Dense(lstm_size, use_bias=False)
        self.attn_query = Dense(lstm_size, use_bias=False)
        self.v_attn = Dense(1, use_bias=False)
        assert entropy_reduction in [
            'sum', 'mean'
        ], 'Entropy reduction must be one of sum and mean.'
        self.entropy_reduction = tf.reduce_sum if entropy_reduction == 'sum' else tf.reduce_mean
        self.cross_entropy_loss = SparseCategoricalCrossentropy(
            from_logits=True, reduction=Reduction.NONE)

        self._first_sample = True
Exemple #30
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train,
                 idx_unlabeled,
                 hidden_layers,
                 use_relu,
                 self_training_labels=None,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 **kwargs):

        super().__init__(adj=adj,
                         x=x,
                         labels=labels,
                         seed=seed,
                         name=name,
                         device=device,
                         **kwargs)
        adj, x, labels = self.adj, self.x, self.labels

        idx_train = asintarr(idx_train)
        idx_unlabeled = asintarr(idx_unlabeled)

        if self_training_labels is None:
            surrogate = DenseGCN(adj,
                                 x,
                                 labels,
                                 device='GPU',
                                 norm_x=None,
                                 seed=None)
            surrogate.build(16, activations='relu' if use_relu else None)
            his = surrogate.train(idx_train,
                                  verbose=0,
                                  epochs=200,
                                  save_best=False)
            self_training_labels = surrogate.predict(idx_unlabeled).argmax(1)

        self.ll_ratio = None
        # mettack can also conduct feature attack
        self.allow_feature_attack = True

        with tf.device(self.device):
            self.idx_train = astensor(idx_train, dtype=self.intx)
            self.idx_unlabeled = astensor(idx_unlabeled, dtype=self.intx)
            self.labels_train = astensor(self.labels[idx_train],
                                         dtype=self.floatx)
            self.self_training_labels = astensor(self_training_labels,
                                                 dtype=self.floatx)
            self.tf_adj = astensor(adj.A, dtype=self.floatx)
            self.tf_x = astensor(x, dtype=self.floatx)
            self.build(hidden_layers=hidden_layers)
            self.use_relu = use_relu
            self.loss_fn = SparseCategoricalCrossentropy(from_logits=True)

            self.adj_changes = tf.Variable(tf.zeros_like(self.tf_adj))
            self.x_changes = tf.Variable(tf.zeros_like(self.tf_x))