Exemple #1
0
def get_all_linear_layers(model):
    layers = []
    all_layers = get_all_layers(model)
    for idx, layer in enumerate(all_layers):
        # check if linear layer
        if hasattr(layer, 'kernel') or hasattr(layer, 'depthwise_kernel'):
            if layer.activation.__name__ != "linear":
                layers.append(layer)
            else:
                next_layer = all_layers[
                    idx + 1] if idx + 1 < len(all_layers) else None

                if isinstance(next_layer, BatchNormalization):
                    next_layer = all_layers[
                        idx + 2] if idx + 2 < len(all_layers) else None

                if isinstance(next_layer, Activation) or isinstance(
                        next_layer, ActivationQ):
                    layers.append(layer)

    return layers
Exemple #2
0
def transform(model, bits_w, bits_x, log=False, quantize=True, verif_preproc=False,
              slalom=False, slalom_integrity=False, slalom_privacy=False, sgxutils=None, queues=None):

    if slalom:
        assert(quantize)

    old_ops = K.get_session().graph.get_operations()

    #all_layers = [[l] if not isinstance(l, ResNetBlock) else l.get_layers() for l in model.layers]
    #all_layers = list(itertools.chain.from_iterable(all_layers))
    all_layers = get_all_layers(model)
    fuse_bn(all_layers)

    queue_ctr = 0
    layers = model.layers
    layer_map = {}
    flattened = False

    def transform_layer(layer, next_layer, queue_ctr, flattened):
        print("transform {} (next = {})".format(layer, next_layer))
        new_layers = []
        skip_next = False

        if isinstance(layer, InputLayer):
            new_layers.append(InputLayer.from_config(layer.get_config()))

        elif isinstance(layer, Conv2D) and not isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            act = conf['activation']

            # if the next layer is a pooling layer, create a fused activation
            maxpool_params = None
            if slalom and isinstance(next_layer, MaxPooling2D):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"

                if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                    assert layer.activation in [relu, relu6]
                    act = "avgpool" + act
                    skip_next = True

                act_layer = ActivationQ(act, bits_w, bits_x, maxpool_params=maxpool_params, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = Conv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, DepthwiseConv2D):
            conf = layer.get_config()

            assert conf['activation'] == "linear"

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            new_layer = DepthwiseConv2DQ.from_config(conf)
            new_layers.append(new_layer)
            layer_map[new_layer] = layer

        elif isinstance(layer, Dense):
            conf = layer.get_config()

            act = conf['activation']

            act_layer = None
            if act != "linear":
                conf['activation'] = "linear"
                act_layer = ActivationQ(act, bits_w, bits_x, log=log,
                                        quantize=quantize, slalom=slalom, slalom_integrity=slalom_integrity,
                                        slalom_privacy=slalom_privacy, sgxutils=sgxutils,
                                        queue=None if queues is None else queues[queue_ctr])
                queue_ctr += 1

            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            conf['slalom'] = slalom
            conf['slalom_integrity'] = slalom_integrity
            conf['slalom_privacy'] = slalom_privacy
            conf['sgxutils'] = sgxutils

            # replace the dense layer by a pointwise convolution
            if verif_preproc:
                del conf['units']
                conf['filters'] = layer.units
                conf['kernel_size'] = 1
                if not flattened:
                    h_in = int(layer.input_spec.axes[-1])
                    new_layers.append(Reshape((1, 1, h_in)))
                    flattened = True
                new_layer = Conv2DQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            else:
                new_layer = DenseQ.from_config(conf)
                new_layers.append(new_layer)
                layer_map[new_layer] = layer

            if act_layer is not None:
                new_layers.append(act_layer)

        elif isinstance(layer, BatchNormalization):
            pass

        elif isinstance(layer, MaxPooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(MaxPooling2D.from_config(layer.get_config()))

        elif isinstance(layer, AveragePooling2D):
            assert (not slalom or not slalom_privacy)
            new_layers.append(AveragePooling2D.from_config(layer.get_config()))
            new_layers.append(Lambda(lambda x: K.round(x)))

        elif isinstance(layer, Activation):
            assert layer.activation in [relu6, relu, softmax]

            queue = None if queues is None else queues[queue_ctr]
            queue_ctr += 1

            act_func = "relu6" if layer.activation == relu6 else "relu" if layer.activation == relu else "softmax"
            if slalom and isinstance(next_layer, GlobalAveragePooling2D):
                #assert layer.activation == relu6
                act_func = "avgpoolrelu6"
                skip_next = True

            maxpool_params = None
            if slalom and (isinstance(next_layer, MaxPooling2D) or isinstance(next_layer, AveragePooling2D)):
                mp = next_layer
                assert (layer.activation == relu)
                maxpool_params = mp.get_config()
                skip_next = True

            new_layers.append(ActivationQ(act_func, bits_w, bits_x, log=log,
                                      maxpool_params=maxpool_params,
                                      quantize=quantize, slalom=slalom,
                                      slalom_integrity=slalom_integrity,
                                      slalom_privacy=slalom_privacy,
                                      sgxutils=sgxutils, queue=queue))

        elif isinstance(layer, ZeroPadding2D):
            if quantize:
                # merge with next layer
                conv = next_layer 
                assert isinstance(conv, Conv2D) or isinstance(conv, DepthwiseConv2D)
                assert conv.padding == 'valid'
                conv.padding = 'same'
            else:
                new_layers.append(ZeroPadding2D.from_config(layer.get_config()))

        elif isinstance(layer, Flatten):
            if not verif_preproc:
                new_layers.append(Flatten.from_config(layer.get_config()))

        elif isinstance(layer, GlobalAveragePooling2D):
            assert not slalom
            conf = layer.get_config()
            conf['bits_w'] = bits_w
            conf['bits_x'] = bits_x
            conf['log'] = log
            conf['quantize'] = quantize
            new_layers.append(GlobalAveragePooling2DQ.from_config(conf))

        elif isinstance(layer, Reshape):
            new_layers.append(Reshape.from_config(layer.get_config()))

        elif isinstance(layer, Dropout):
            pass

        elif isinstance(layer, ResNetBlock):
            #assert not slalom

            path1 = []
            path2 = []
            for l in layer.path1:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path1.extend(lq)

            for l in layer.path2:
                lq, queue_ctr, _, _ = transform_layer(l, None, queue_ctr, flattened)
                path2.extend(lq)

            [actq], queue_ctr, flattened, skip_next = transform_layer(layer.merge_act, next_layer, queue_ctr, flattened)
            new_layer = ResNetBlock(layer.kernel_size, layer.filters, layer.stage, layer.block, layer.identity,
                                    layer.strides, path1=path1, path2=path2, merge_act=actq, 
                                    quantize=quantize, bits_w=bits_w, bits_x=bits_x,
                                    slalom=slalom, slalom_integrity=slalom_integrity, slalom_privacy=slalom_privacy)

            new_layers.append(new_layer)
        else:
            raise AttributeError("Don't know how to handle layer {}".format(layer))

        return new_layers, queue_ctr, flattened, skip_next

    new_model = Sequential()
    skip_next = False
    l_count = 0
    localf = False
    while layers:
        layer = layers.pop(0)
        next_layer = layers[0] if len(layers) else None
        if (l_count <= 6):
            if not skip_next:
                new_layers, queue_ctr, flattened, skip_next = transform_layer(layer, next_layer, queue_ctr, flattened)
                for new_layer in new_layers:
                    new_model.add(new_layer)
            else:
                skip_next = False
        else:
            skip_next = False
            if isinstance(layer, MaxPooling2D):
                conf = layer.get_config()
                new_layer = MaxPooling2D.from_config(conf)
                new_layers.append(new_layer)
            elif isinstance(layer, Flatten):
                conf = layer.get_config()
                new_layer = Flatten.from_config(conf)
                new_layers.append(new_layer)
            elif isinstance(layer, Conv2D):
                conf = layer.get_config()
                #conf['bits_w'] = bits_w
                #conf['bits_x'] = bits_x
                #conf["log"] = log
                #conf['quantize'] = quantize
                #conf['slalom'] = None
                #conf['slalom_integrity'] = None
                #conf['slalom_privacy'] = None
                #conf['sgxutils'] = None
                new_layer = layer
                new_layers.append(new_layer)
                
            elif isinstance(layer, Dense):
                conf = layer.get_config()
                #conf["log"] = log
                #conf['quantize'] = quantize
                #conf['slalom'] = None
                #conf['slalom_integrity'] = None
                #conf['slalom_privacy'] = None
                #conf['sgxutils'] = None
                #conf['bits_w'] = bits_w
                #conf['bits_x'] = bits_x
                new_layer = layer
               	new_layers.append(new_layer)
                
            else:
                print(type(layer))
                assert(False)
            new_model.add(new_layer)
        l_count = l_count + 1  
    print("transformed summery")

    print(new_model.summary())

    # copy over (and potentially quantize) all the weights
    new_layers = get_all_layers(new_model)

    for layer in new_layers:
        if layer in layer_map:
            src_layer = layer_map[layer]

            weights = src_layer.get_weights()
            kernel = weights[0]
            bias = weights[1]
            print(layer)
            if quantize and isinstance(layer, Conv2DQ):
                range_w = 2**bits_w
                range_x = 2**bits_x
                kernel_q = np.round(range_w * kernel)
                bias_q = np.round(range_w * range_x * bias)
                if slalom_privacy:

                    if isinstance(layer, DepthwiseConv2DQ):
                        bias_q = bias_q.astype(np.float64)
                        kernel_q = kernel_q.astype(np.float64)

                layer._trainable_weights = layer._trainable_weights[2:]

                if isinstance(src_layer, Dense) and verif_preproc:
                    kernel_q = np.reshape(kernel_q, (1, 1, kernel_q.shape[0], kernel_q.shape[1]))

                layer.set_weights((kernel_q, bias_q))
            else:
                layer._trainable_weights = layer._trainable_weights[2:]
                layer.set_weights((kernel, bias))

    # find all the TensorFlow ops that correspond to inputs/outputs of linear operators
    new_ops = [op for op in K.get_session().graph.get_operations() if op not in old_ops]
    linear_ops_in = [tf.reshape(op.inputs[0], [-1]) for op in new_ops if op.type in ['Conv2D', 'MatMul', 'DepthwiseConv2dNative']]
    linear_ops_out = [tf.reshape(op.outputs[0], [-1]) for op in new_ops if op.type in ['BiasAdd']]

    return new_model, linear_ops_in, linear_ops_out