Beispiel #1
0
    def __init__(
            self,
            vocabulary_size,
            embedding_size,
            num_sampled=64,
            activate_nce_loss=True,
            nce_loss_args=None,
            E_init=tl.initializers.random_uniform(minval=-1.0, maxval=1.0),
            nce_W_init=tl.initializers.truncated_normal(stddev=0.03),
            nce_b_init=tl.initializers.constant(value=0.0),
            name=None,  #'word2vec',
    ):

        super(Word2vecEmbedding, self).__init__(name)
        self.vocabulary_size = vocabulary_size
        self.embedding_size = embedding_size
        self.num_sampled = num_sampled
        self.E_init = E_init
        self.activate_nce_loss = activate_nce_loss

        if self.activate_nce_loss:
            self.nce_loss_args = nce_loss_args
            self.nce_W_init = nce_W_init
            self.nce_b_init = nce_b_init

        if not self._built:
            self.build(tuple())
            self._built = True

        logging.info("Word2vecEmbedding %s: (%d, %d)" % (self.name, self.vocabulary_size, self.embedding_size))
Beispiel #2
0
    def __init__(
            self,
            keep=0.5,
            n_units=100,
            act=None,
            W_init=tl.initializers.truncated_normal(stddev=0.1),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None,  # 'dropconnect',
    ):
        super().__init__(name)

        if isinstance(keep, numbers.Real) and not (keep > 0 and keep <= 1):
            raise ValueError("keep must be a scalar tensor or a float in the "
                             "range (0, 1], got %g" % keep)

        self.keep = keep
        self.n_units = n_units
        self.act = act
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        if self.in_channels is not None:
            self.build((None, self.in_channels))
            self._built = True

        logging.info(
            "DropconnectDense %s: %d %s" %
            (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation')
        )
Beispiel #3
0
    def __init__(
            self,
            act=None,
            shape=(5, 1, 5),
            stride=1,
            padding='SAME',
            data_format='NWC',
            dilation_rate=1,
            W_init=tl.initializers.truncated_normal(stddev=0.02),
            b_init=tl.initializers.constant(value=0.0),
            name=None  # 'cnn1d_layer',
    ):
        super().__init__(name)
        self.act = act
        self.n_filter = shape[-1]
        self.filter_size = shape[0]
        self.shape = shape
        self.stride = stride
        self.dilation_rate = dilation_rate
        self.padding = padding
        self.data_format = data_format
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = shape[-2]

        self.build(None)
        self._built = True

        logging.info(
            "Conv1dLayer %s: shape: %s stride: %s pad: %s act: %s" % (
                self.name, str(shape), str(stride), padding,
                self.act.__name__ if self.act is not None else 'No Activation'
            )
        )
Beispiel #4
0
    def __init__(
            self,
            n_units,
            act=None,
            W_init=tl.initializers.truncated_normal(stddev=0.1),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None,  # 'dense',
    ):

        super(Dense, self).__init__(name)

        self.n_units = n_units
        self.act = act
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        if self.in_channels is not None:
            self.build(self.in_channels)
            self._built = True

        logging.info(
            "Dense  %s: %d %s" %
            (self.name, self.n_units, self.act.__name__ if self.act is not None else 'No Activation')
        )
 def gunzip_file(gz_path, new_path):
     """Unzips from gz_path into new_path."""
     logging.info("Unpacking %s to %s" % (gz_path, new_path))
     with gzip.open(gz_path, "rb") as gz_file:
         with open(new_path, "wb") as new_file:
             for line in gz_file:
                 new_file.write(line)
Beispiel #6
0
    def __init__(
            self,
            fw_cell,
            bw_cell,
            return_seq_2d=False,
            return_state=False,
            in_channels=None,
            name=None,  # 'birnn'
    ):
        super(BiRNN, self).__init__(name)

        self.fw_cell = fw_cell
        self.bw_cell = bw_cell
        self.return_seq_2d = return_seq_2d
        self.return_state = return_state

        if in_channels is not None:
            self.build((None, None, in_channels))
            self._built = True

        logging.info(
            "BiRNN %s: fw_cell: %s, fw_n_units: %s, bw_cell: %s, bw_n_units: %s" % (
                self.name, self.fw_cell.__class__.__name__, self.fw_cell.units, self.bw_cell.__class__.__name__,
                self.bw_cell.units
            )
        )
def load_celebA_dataset(path='data'):
    """Load CelebA dataset

    Return a list of image path.

    Parameters
    -----------
    path : str
        The path that the data is downloaded to, defaults is ``data/celebA/``.

    """
    data_dir = 'celebA'
    filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM"
    save_path = os.path.join(path, filename)
    image_path = os.path.join(path, data_dir)
    if os.path.exists(image_path):
        logging.info('[*] {} already exists'.format(save_path))
    else:
        exists_or_mkdir(path)
        download_file_from_google_drive(drive_id, save_path)
        zip_dir = ''
        with zipfile.ZipFile(save_path) as zf:
            zip_dir = zf.namelist()[0]
            zf.extractall(path)
        os.remove(save_path)
        os.rename(os.path.join(path, zip_dir), image_path)

    data_files = load_file_list(path=image_path, regx='\\.jpg', printable=False)
    for i, _v in enumerate(data_files):
        data_files[i] = os.path.join(image_path, data_files[i])
    return data_files
Beispiel #8
0
    def __init__(
            self,
            act=None,
            epsilon=1e-5,
            beta_init=tl.initializers.constant(0.0),
            gamma_init=tl.initializers.constant(1.0),
            moving_mean_init=tl.initializers.zeros(),
            # beta_init=tf.compat.v1.initializers.constant(0.0),
            # gamma_init=tf.compat.v1.initializers.constant(1.0),
            # moving_mean_init=tf.compat.v1.initializers.zeros(),
            data_format='channels_last',
            name=None,  #'switchnorm',
    ):
        # super(SwitchNorm, self).__init__(prev_layer=prev_layer, act=act, name=name)
        super().__init__(name)
        self.act = act
        self.epsilon = epsilon
        self.beta_init = beta_init
        self.gamma_init = gamma_init
        self.moving_mean_init = moving_mean_init
        self.data_format = data_format

        logging.info(
            "SwitchNorm %s: epsilon: %f act: %s" %
            (self.name, epsilon, self.act.__name__ if self.act is not None else 'No Activation')
        )
Beispiel #9
0
    def __init__(self, name=None):  #'flatten'):
        super(Flatten, self).__init__(name)

        self.build()
        self._built = True

        logging.info("Flatten %s:" % (self.name))
Beispiel #10
0
    def __init__(
            self,  #prev_layer,
            center=True,
            scale=True,
            act=None,
            # reuse=None,
            # variables_collections=None,
            # outputs_collections=None,
            # trainable=True,
            epsilon=1e-12,
            begin_norm_axis=1,
            begin_params_axis=-1,
            beta_init=tl.initializers.zeros(),
            gamma_init=tl.initializers.ones(),
            data_format='channels_last',
            name=None,
    ):

        # super(LayerNorm, self).__init__(prev_layer=prev_layer, act=act, name=name)
        super(LayerNorm, self).__init__(name)
        self.center = center
        self.scale = scale
        self.act = act
        self.epsilon = epsilon
        self.begin_norm_axis = begin_norm_axis
        self.begin_params_axis = begin_params_axis
        self.beta_init = beta_init
        self.gamma_init = gamma_init
        self.data_format = data_format

        logging.info(
            "LayerNorm %s: act: %s" % (self.name, self.act.__name__ if self.act is not None else 'No Activation')
        )
Beispiel #11
0
    def __init__(
            self,
            n_units=100,
            act=None,
            bitW=8,
            bitA=8,
            use_gemm=False,
            W_init=tl.initializers.truncated_normal(stddev=0.1),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None,  #'quan_dense',
    ):
        super().__init__(name)
        self.n_units = n_units
        self.act = act
        self.bitW = bitW
        self.bitA = bitA
        self.use_gemm = use_gemm
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        if self.in_channels is not None:
            self.build((None, self.in_channels))
            self._built = True

        logging.info(
            "QuanDense  %s: %d %s" %
            (self.name, n_units, self.act.__name__ if self.act is not None else 'No Activation')
        )
def load_nietzsche_dataset(path='data'):
    """Load Nietzsche dataset.

    Parameters
    ----------
    path : str
        The path that the data is downloaded to, defaults is ``data/nietzsche/``.

    Returns
    --------
    str
        The content.

    Examples
    --------
    >>> see tutorial_generate_text.py
    >>> words = tl.files.load_nietzsche_dataset()
    >>> words = basic_clean_str(words)
    >>> words = words.split()

    """
    logging.info("Load or Download nietzsche dataset > {}".format(path))
    path = os.path.join(path, 'nietzsche')

    filename = "nietzsche.txt"
    url = 'https://s3.amazonaws.com/text-datasets/'
    filepath = maybe_download_and_extract(filename, path, url)

    with open(filepath, "r") as f:
        words = f.read()
        return words
    def __init__(
            self,
            offset_layer=None,
            # shape=(3, 3, 1, 100),
            n_filter=32,
            filter_size=(3, 3),
            act=None,
            padding='SAME',
            W_init=tl.initializers.truncated_normal(stddev=0.02),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None  # 'deformable_conv_2d',
    ):
        super().__init__(name)

        self.offset_layer = offset_layer
        self.n_filter = n_filter
        self.filter_size = filter_size
        self.act = act
        self.padding = padding
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        self.kernel_n = filter_size[0] * filter_size[1]
        if self.offset_layer.get_shape()[-1] != 2 * self.kernel_n:
            raise AssertionError("offset.get_shape()[-1] is not equal to: %d" % 2 * self.kernel_n)

        logging.info(
            "DeformableConv2d %s: n_filter: %d, filter_size: %s act: %s" %
            (self.name, self.n_filter, str(self.filter_size), self.act.__name__ if self.act is not None else 'No Activation')
        )
Beispiel #14
0
    def __init__(self, num=None, axis=0, name=None):  #'unstack'):
        super().__init__(name)
        self.num = num
        self.axis = axis

        self.build(None)
        self._built = True
        logging.info("UnStack %s: num: %s axis: %d" % (self.name, self.num, self.axis))
Beispiel #15
0
    def __init__(self, group, name=None):  #'reshape'):
        super(Shuffle, self).__init__(name)
        self.group = group

        logging.info("Shuffle %s" % (self.name))

        self.build()
        self._built = True
Beispiel #16
0
    def __init__(self, shape, name=None):  #'reshape'):
        super(Reshape, self).__init__(name)
        self.shape = shape

        logging.info("Reshape %s" % (self.name))

        self.build()
        self._built = True
Beispiel #17
0
    def find_top_dataset(self, dataset_name=None, sort=None, **kwargs):
        """Finds and returns a dataset from the database which matches the requirement.

        Parameters
        ----------
        dataset_name : str
            The name of dataset.
        sort : List of tuple
            PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
        kwargs : other events
            Other events, such as description, author and etc (optinal).

        Examples
        ---------
        Save dataset
        >>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')

        Get dataset
        >>> dataset = db.find_top_dataset('mnist')
        >>> datasets = db.find_datasets('mnist')

        Returns
        --------
        dataset : the dataset or False
            Return False if nothing found.

        """

        self._fill_project_info(kwargs)
        if dataset_name is None:
            raise Exception("dataset_name is None, please give a dataset name")
        kwargs.update({'dataset_name': dataset_name})

        s = time.time()

        d = self.db.Dataset.find_one(filter=kwargs, sort=sort)

        if d is not None:
            dataset_id = d['dataset_id']
        else:
            print("[Database] FAIL! Cannot find dataset: {}".format(kwargs))
            return False
        try:
            dataset = self._deserialization(self.dataset_fs.get(dataset_id).read())
            pc = self.db.Dataset.find(kwargs)
            print("[Database] Find one dataset SUCCESS, {} took: {}s".format(kwargs, round(time.time() - s, 2)))

            # check whether more datasets match the requirement
            dataset_id_list = pc.distinct('dataset_id')
            n_dataset = len(dataset_id_list)
            if n_dataset != 1:
                print("     Note that there are {} datasets match the requirement".format(n_dataset))
            return dataset
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            logging.info("{}  {}  {}  {}  {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
            return False
Beispiel #18
0
    def __init__(self, multiples=None, name=None):  #'tile'):

        super(Tile, self).__init__(name)
        self.multiples = multiples

        self.build((None, ))
        self._built = True

        logging.info("Tile  %s: multiples: %s" % (self.name, self.multiples))
Beispiel #19
0
    def save_model(self, network=None, model_name='model', **kwargs):
        """Save model architecture and parameters into database, timestamp will be added automatically.

        Parameters
        ----------
        network : TensorLayer layer
            TensorLayer layer instance.
        model_name : str
            The name/key of model.
        kwargs : other events
            Other events, such as name, accuracy, loss, step number and etc (optinal).

        Examples
        ---------
        Save model architecture and parameters into database.
        >>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')

        Load one model with parameters from database (run this in other script)
        >>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)

        Find and load the latest model.
        >>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.DESCENDING)])
        >>> net = db.find_top_model(sess=sess, sort=[("time", -1)])

        Find and load the oldest model.
        >>> net = db.find_top_model(sess=sess, sort=[("time", pymongo.ASCENDING)])
        >>> net = db.find_top_model(sess=sess, sort=[("time", 1)])

        Get model information
        >>> net._accuracy
        ... 0.8

        Returns
        ---------
        boolean : True for success, False for fail.
        """
        kwargs.update({'model_name': model_name})
        self._fill_project_info(kwargs)  # put project_name into kwargs

        params = network.get_all_params()

        s = time.time()

        kwargs.update({'architecture': network.all_graphs, 'time': datetime.utcnow()})

        try:
            params_id = self.model_fs.put(self._serialization(params))
            kwargs.update({'params_id': params_id, 'time': datetime.utcnow()})
            self.db.Model.insert_one(kwargs)
            print("[Database] Save model: SUCCESS, took: {}s".format(round(time.time() - s, 2)))
            return True
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            logging.info("{}  {}  {}  {}  {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
            print("[Database] Save model: FAIL")
            return False
Beispiel #20
0
    def __init__(self, perm=None, conjugate=False, name=None):  #'transpose'):
        super(Transpose, self).__init__(name)
        self.perm = perm
        self.conjugate = conjugate

        logging.info("Transpose  %s: perm: %s, conjugate: %s" % (self.name, self.perm, self.conjugate))

        self.build()
        self._built = True
Beispiel #21
0
def restore_params(network, path='models'):
    logging.info("Restore pre-trained parameters")
    maybe_download_and_extract(
        'squeezenet.npz', path, 'https://github.com/tensorlayer/pretrained-models/raw/master/models/',
        expected_bytes=7405613
    )  # ls -al
    params = load_npz(name=os.path.join(path, 'squeezenet.npz'))
    assign_weights(params[:len(network.weights)], network)
    del params
Beispiel #22
0
    def __init__(self, groups=32, epsilon=1e-06, act=None, data_format='channels_last', name=None):  #'groupnorm'):
        # super(GroupNorm, self).__init__(prev_layer=prev_layer, act=act, name=name)
        super().__init__(name)
        self.groups = groups
        self.epsilon = epsilon
        self.act = act
        self.data_format = data_format

        logging.info(
            "GroupNorm %s: act: %s" % (self.name, self.act.__name__ if self.act is not None else 'No Activation')
        )
Beispiel #23
0
    def __init__(
            self,
            axis=1,
            name=None,  #'stack',
    ):
        super().__init__(name)
        self.axis = axis

        self.build(None)
        self._built = True
        logging.info("Stack %s: axis: %d" % (self.name, self.axis))
Beispiel #24
0
    def delete_model(self, **kwargs):
        """Delete model.

        Parameters
        -----------
        kwargs : logging information
            Find items to delete, leave it empty to delete all log.
        """
        self._fill_project_info(kwargs)
        self.db.Model.delete_many(kwargs)
        logging.info("[Database] Delete Model SUCCESS")
Beispiel #25
0
    def __init__(
            self,
            init_scale=0.05,
            name='scale',
    ):
        super(Scale, self).__init__(name)
        self.init_scale = init_scale

        self.build((None, ))
        self._built = True

        logging.info("Scale  %s: init_scale: %f" % (self.name, self.init_scale))
Beispiel #26
0
    def __init__(
            self,
            axis,
            name=None  # 'expand_dims',
    ):
        super(ExpandDims, self).__init__(name)
        self.axis = axis

        self.build((None, ))
        self._built = True

        logging.info("ExpandDims  %s: axis: %d" % (self.name, self.axis))
Beispiel #27
0
    def __init__(
            self,
            data_format='channels_last',
            name=None  # 'globalmeanpool3d'
    ):
        super().__init__(name)
        self.data_format = data_format

        self.build()
        self._built = True

        logging.info("GlobalMeanPool3d %s" % self.name)
Beispiel #28
0
    def __init__(
            self,
            concat_dim=-1,
            name=None,  #'concat',
    ):

        super(Concat, self).__init__(name)
        self.concat_dim = concat_dim

        self.build(None)
        self._built = True

        logging.info("Concat %s: concat_dim: %d" % (self.name, concat_dim))
Beispiel #29
0
    def __init__(
            self,
            data_format="channels_last",
            name=None  # 'globalmaxpool1d'
    ):
        super().__init__(name)

        self.data_format = data_format

        self.build()
        self._built = True

        logging.info("GlobalMaxPool1d %s" % self.name)
Beispiel #30
0
def restore_model(model, layer_type):
    logging.info("Restore pre-trained weights")
    # download weights
    maybe_download_and_extract(model_saved_name[layer_type], 'models', model_urls[layer_type])
    weights = []
    if layer_type == 'vgg16':
        npz = np.load(os.path.join('models', model_saved_name[layer_type]))
        # get weight list
        for val in sorted(npz.items()):
            logging.info("  Loading weights %s in %s" % (str(val[1].shape), val[0]))
            weights.append(val[1])
            if len(model.weights) == len(weights):
                break
    elif layer_type == 'vgg19':
        npz = np.load(os.path.join('models', model_saved_name[layer_type]), encoding='latin1').item()
        # get weight list
        for val in sorted(npz.items()):
            logging.info("  Loading %s in %s" % (str(val[1][0].shape), val[0]))
            logging.info("  Loading %s in %s" % (str(val[1][1].shape), val[0]))
            weights.extend(val[1])
            if len(model.weights) == len(weights):
                break
    # assign weight values
    assign_weights(weights, model)
    del weights
Beispiel #31
0
    def __init__(
            self,
            bitW=1,
            bitA=3,
            n_filter=32,
            filter_size=(3, 3),
            strides=(1, 1),
            act=None,
            padding='SAME',
            use_gemm=False,
            data_format="channels_last",
            dilation_rate=(1, 1),
            W_init=tl.initializers.truncated_normal(stddev=0.02),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None  # 'dorefa_cnn2d',
    ):
        super().__init__(name)
        self.bitW = bitW
        self.bitA = bitA
        self.n_filter = n_filter
        self.filter_size = filter_size
        self.strides = self._strides = strides
        self.act = act
        self.padding = padding
        self.use_gemm = use_gemm
        self.data_format = data_format
        self.dilation_rate = self._dilation_rate = dilation_rate
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        if self.in_channels:
            self.build(None)
            self._built = True

        logging.info(
            "DorefaConv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s"
            % (self.name, n_filter, str(filter_size), str(strides), padding,
               self.act.__name__ if self.act is not None else 'No Activation'))

        if self.use_gemm:
            raise Exception(
                "TODO. The current version use tf.matmul for inferencing.")

        if len(self.strides) != 2:
            raise ValueError("len(strides) should be 2.")
Beispiel #32
0
    def __init__(
            self,
            prev_layer,
            n_filter=32,
            filter_size=(3, 3),
            out_size=(30, 30),  # remove
            strides=(2, 2),
            padding='SAME',
            batch_size=None,  # remove
            act=None,
            W_init=tf.truncated_normal_initializer(stddev=0.02),
            b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,  # TODO: Remove when TF <1.3 not supported
            b_init_args=None,  # TODO: Remove when TF <1.3 not supported
            name='decnn2d'):
        super(DeConv2d, self).__init__(prev_layer=prev_layer,
                                       act=act,
                                       W_init_args=W_init_args,
                                       b_init_args=b_init_args,
                                       name=name)

        logging.info(
            "DeConv2d %s: n_filters: %s strides: %s pad: %s act: %s" %
            (self.name, str(n_filter), str(strides), padding,
             self.act.__name__ if self.act is not None else 'No Activation'))

        if len(strides) != 2:
            raise ValueError(
                "len(strides) should be 2, DeConv2d and DeConv2dLayer are different."
            )

        conv2d_transpose = tf.layers.Conv2DTranspose(filters=n_filter,
                                                     kernel_size=filter_size,
                                                     strides=strides,
                                                     padding=padding,
                                                     activation=self.act,
                                                     kernel_initializer=W_init,
                                                     bias_initializer=b_init,
                                                     name=name)

        self.outputs = conv2d_transpose(self.inputs)
        # new_variables = conv2d_transpose.weights  # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
Beispiel #33
0
    def __init__(self,
                 groups=32,
                 epsilon=1e-06,
                 act=None,
                 data_format='channels_last',
                 name=None):  #'groupnorm'):
        # super(GroupNorm, self).__init__(prev_layer=prev_layer, act=act, name=name)
        super().__init__(name)
        self.groups = groups
        self.epsilon = epsilon
        self.act = act
        self.data_format = data_format

        logging.info(
            "GroupNorm %s: act: %s" %
            (self.name,
             self.act.__name__ if self.act is not None else 'No Activation'))
    def __init__(
        self,
        prev_layer,
        padding,
        name='zeropad3d',
    ):
        super(ZeroPad3d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("ZeroPad3d   %s: padding: %s" % (self.name, str(padding)))

        if not isinstance(padding, (int, tuple)):
            raise AssertionError()

        self.outputs = tf.keras.layers.ZeroPadding3D(
            padding=padding, name=name)(self.inputs)  # TODO: Stop using Keras

        self._add_layers(self.outputs)
Beispiel #35
0
    def __init__(
            self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last',
            name='meanpool3d'
    ):

        super(MeanPool3d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info(
            "MeanPool3d %s: filter_size: %s strides: %s padding: %s" %
            (self.name, str(filter_size), str(strides), str(padding))
        )

        self.outputs = tf.layers.average_pooling3d(
            prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name
        )

        self._add_layers(self.outputs)
Beispiel #36
0
    def __init__(self, train_or_test, name='cyclegan', path='raw_data', filename='summer2winter_yosemite'):
        self.path = os.path.join(path, name)
        self.train_or_test = train_or_test

        if folder_exists(os.path.join(path, filename)) is False:
            logging.info("[*] {} is nonexistent in {}".format(filename, path))
            filepath = maybe_download_and_extract(filename=filename + '.zip', working_directory=path,
                                                  url_source=CYCLEGAN_BASE_URL, extract=True)
            del_file(filepath)

        assert self.train_or_test in ['train', 'test']
        if self.train_or_test == 'train':
            self.im_A_path = load_file_list(path=os.path.join(path, filename, "trainA"), regx='\\.jpg', printable=False)
            self.im_B_path = load_file_list(path=os.path.join(path, filename, "trainB"), regx='\\.jpg', printable=False)
        else:
            self.im_A_path = load_file_list(path=os.path.join(path, filename, "testA"), regx='\\.jpg', printable=False)
            self.im_B_path = load_file_list(path=os.path.join(path, filename, "testB"), regx='\\.jpg', printable=False)
    def __init__(
        self,
        out_size=(40, 40),
        in_channels=None,
        data_format='channel_last',
        name=None,
    ):
        super(SpatialTransformer2dAffine, self).__init__(name)

        self.in_channels = in_channels
        self.out_size = out_size
        self.data_format = data_format
        if self.in_channels is not None:
            self.build(self.in_channels)
            self._built = True

        logging.info("SpatialTransformer2dAffine %s" % self.name)
Beispiel #38
0
    def find_min_max_kld(data):
        (P, min_data, max_data, delta) = convert_layer_output(data)
        P = smooth(P, 512)
        # find max first
        klds_max = calc_kld(P, QUANTIZE_SIZE, BINS_NUMBER, 0, 0, delta,
                            max_data, min_data)
        (tmp, max_bin) = min(zip(klds_max.values(), klds_max.keys()))[1]
        klds_min = calc_kld(P, max_bin, max_bin, 0, max_bin - 1, delta,
                            max_data, min_data)
        (min_bin, tmp) = min(zip(klds_min.values(), klds_min.keys()))[1]

        threshold_min = (min_bin) * delta + (min_data)
        threshold_max = (max_bin) * delta + (min_data)
        logging.info('Min data(threshold_min): %f' % threshold_min)
        logging.info('Max data(threshold_max): %f' % threshold_max)

        return (threshold_min, threshold_max)
Beispiel #39
0
    def __init__(
            self,
            n_filter=32,
            filter_size=(3, 3),
            strides=(2, 2),
            act=None,
            padding='SAME',
            dilation_rate=(1, 1),
            data_format='channels_last',
            W_init=tl.initializers.truncated_normal(stddev=0.02),
            b_init=tl.initializers.constant(value=0.0),
            in_channels=None,
            name=None  # 'decnn2d'
    ):
        super().__init__(name)
        self.n_filter = n_filter
        self.filter_size = filter_size
        self.strides = strides
        self.padding = padding
        self.act = act
        self.data_format = data_format
        self.dilation_rate = dilation_rate
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        # Attention: To build, we need not only the in_channels!
        # if self.in_channels:
        #     self.build(None)
        #     self._built = True

        logging.info(
            "DeConv2d {}: n_filters: {} strides: {} padding: {} act: {} dilation: {}"
            .format(
                self.name,
                str(n_filter),
                str(strides),
                padding,
                self.act.__name__ if self.act is not None else 'No Activation',
                dilation_rate,
            ))

        if len(strides) != 2:
            raise ValueError(
                "len(strides) should be 2, DeConv2d and DeConv2dLayer are different."
            )
Beispiel #40
0
    def __init__(self, network, idx):
        logging.info("### init Upload_Layer")
        #KLA_LINEAR = 0,KLA_RELU = 1,KLA_RELU6 = 2
        self.type = EL_K210_UPLOAD
        self.typename = "EL_K210_UPLOAD"
        layer = network.all_layers[idx]
        shape = layer._nodes[0].out_tensors[0].shape.as_list()
        if len(shape) != 4:
            raise RuntimeError("only support 4-D fc now!")
        self.width = shape[1]
        self.height = shape[2]
        self.channels = shape[3]

        self.memsize = self.width * self.height * self.channels
        self.outsize = 0
        logging.info("Upload_Layer: WxHxC=%dx%dx%d" %
                     (self.width, self.height, self.channels))
Beispiel #41
0
    def __init__(
            self,
            padding=None,
            mode='CONSTANT',
            name=None,  #'pad_layer',
    ):
        # super(PadLayer, self).__init__(prev_layer=prev_layer, name=name)
        super().__init__(name)
        self.padding = padding
        self.mode = mode

        logging.info("PadLayer   %s: padding: %s mode: %s" % (self.name, list(self.padding), self.mode))

        if self.padding is None:
            raise Exception(
                "padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad"
            )
Beispiel #42
0
    def __init__(
            self,
            channel_shared=False,
            a_init=tf.compat.v1.initializers.truncated_normal(mean=0.0,
                                                              stddev=0.1),
            a_init_args=None,
            name=None  # "prelu"
    ):

        # super(PRelu, self).__init__(prev_layer=prev_layer, act=tf.nn.leaky_relu, a_init_args=a_init_args, name=name)
        super().__init__(name)
        self.channel_shared = channel_shared
        self.a_init = a_init
        self.a_init_args = a_init_args

        logging.info("PRelu %s: channel_shared: %s" %
                     (self.name, self.channel_shared))
Beispiel #43
0
    def __init__(self,
                 prev_layer,
                 n_filter=32,
                 filter_size=5,
                 stride=1,
                 dilation_rate=1,
                 act=None,
                 padding='SAME',
                 data_format="channels_last",
                 W_init=tf.truncated_normal_initializer(stddev=0.02),
                 b_init=tf.constant_initializer(value=0.0),
                 W_init_args=None,
                 b_init_args=None,
                 name='conv1d'):
        super(Conv1d, self).__init__(prev_layer=prev_layer,
                                     act=act,
                                     W_init_args=W_init_args,
                                     b_init_args=b_init_args,
                                     name=name)

        logging.info(
            "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s dilation_rate: %d"
            % (self.name, n_filter, filter_size, stride, padding,
               self.act.__name__ if self.act is not None else 'No Activation',
               dilation_rate))

        _conv1d = tf.layers.Conv1D(filters=n_filter,
                                   kernel_size=filter_size,
                                   strides=stride,
                                   padding=padding,
                                   data_format=data_format,
                                   dilation_rate=dilation_rate,
                                   activation=self.act,
                                   use_bias=(True if b_init else False),
                                   kernel_initializer=W_init,
                                   bias_initializer=b_init,
                                   name=name)

        # _conv1d.dtype = LayersConfig.tf_dtype   # unsupport, it will use the same dtype of inputs
        self.outputs = _conv1d(self.inputs)
        # new_variables = _conv1d.weights  # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
Beispiel #44
0
    def __init__(
            self,
            combine_fn=tf.minimum,
            act=None,
            name=None,  #'elementwise',
    ):

        super(Elementwise, self).__init__(name, act=act)
        self.combine_fn = combine_fn

        self.build(None)
        self._built = True

        logging.info(
            "Elementwise %s: fn: %s act: %s" %
            (self.name, combine_fn.__name__,
             ('No Activation' if self.act is None else self.act.__name__)))
Beispiel #45
0
    def train_and_validate_to_end(self, validate_step_size=50):
        """Train the model until the end of the dataset, and validate every N mini-batches.

        Parameters
        ----------
        validate_step_size : int
            Validate the training network every N steps.

        """
        while not self._sess.should_stop():
            self.train_on_batch()  # Run a training step synchronously.
            if self.global_step % validate_step_size == 0:
                # logging.info("Average loss for validation dataset: %s" % self.get_validation_metrics())
                log_str = 'step: %d, ' % self.global_step
                for n, m in self.validation_metrics:
                    log_str += '%s: %f, ' % (n.name, m)
                logging.info(log_str)
Beispiel #46
0
    def __init__(self,
                 prev_layer,
                 data_format='channels_last',
                 name='globalmaxpool2d'):
        super(GlobalMaxPool2d, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("GlobalMaxPool2d %s" % self.name)

        if data_format == 'channels_last':
            self.outputs = tf.reduce_max(self.inputs, axis=[1, 2], name=name)
        elif data_format == 'channels_first':
            self.outputs = tf.reduce_max(self.inputs, axis=[2, 3], name=name)
        else:
            raise ValueError(
                "`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
            )
        self._add_layers(self.outputs)
Beispiel #47
0
    def delete_tasks(self, **kwargs):
        """Delete tasks.

        Parameters
        -----------
        kwargs : logging information
            Find items to delete, leave it empty to delete all log.

        Examples
        ---------
        >>> db.delete_tasks()

        """

        self._fill_project_info(kwargs)
        self.db.Task.delete_many(kwargs)
        logging.info("[Database] Delete Task SUCCESS")
Beispiel #48
0
    def __init__(self,
                 prev_layer,
                 act=None,
                 shape=(5, 1, 5),
                 stride=1,
                 padding="SAME",
                 W_init=tf.truncated_normal_initializer(stddev=0.02),
                 b_init=tf.constant_initializer(value=0.0),
                 W_init_args=None,
                 b_init_args=None,
                 name="TCAConv1dLayer"
                 ):
        super(TCAConv1dLayer, self).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args,
                                             b_init_args=b_init_args, name=name)

        logging.info(
            "TCACConv1dLayer %s: shape: %s stride: %s pad: %s act: %s" % (
                self.name, str(shape), str(stride), padding,
                self.act.__name__ if self.act is not None else 'No Activation'
            )
        )

        if self.inputs.get_shape().ndims != 4:
            raise AssertionError(
                "The input dimension must be rand 3[batch_size, sentence, word_representation, relation_cnt]")
        self.relation_cnt = int(self.inputs.get_shape()[-1])
        self.sentence_size = int(self.inputs.get_shape()[1])
        with tf.variable_scope(name):
            self.index_layer = [IndexLayer(self.inputs, i, "IndexLayer_%d" % (i)) for i in range(self.relation_cnt)]
            self.conv_layer = [
                tl.layers.Conv1dLayer(layer, act=act, shape=shape, stride=stride, padding=padding, W_init=W_init,
                                      b_init=b_init, W_init_args=W_init_args, b_init_args=b_init_args,
                                      name="conv1d_%d" % (layer.index)) for layer in
                self.index_layer]
            self.maxpooling_layer = \
                [
                    tl.layers.MaxPool1d(layer, filter_size=self.sentence_size, strides=self.sentence_size,
                                        padding="valid", name="maxpooling1d_%d" % (i))
                    for i, layer in enumerate(self.conv_layer)
                ]
            self.outputs = tf.concat(
                [tf.expand_dims(input=tf.squeeze(layer.outputs, axis=1), axis=-1) for layer in self.maxpooling_layer],
                axis=-1)
            # (32, 1000, 19)
        self._add_layers(self.outputs)
    def __init__(
            self,
            n_filter=32,
            filter_size=(3, 3),
            strides=(2, 2),
            act=None,
            padding='SAME',
            dilation_rate=(1, 1),
            data_format='channels_last',
            W_init=tf.compat.v1.initializers.truncated_normal(stddev=0.02),
            b_init=tf.compat.v1.initializers.constant(value=0.0),
            W_init_args=None,  # TODO: Remove when TF <1.3 not supported
            b_init_args=None,  # TODO: Remove when TF <1.3 not supported
            name=None,  #'decnn2d'
    ):
        # super(DeConv2d, self
        #      ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)
        super().__init__(name)
        self.n_filter = n_filter
        self.filter_size = filter_size
        self.strides = strides
        self.padding = padding
        self.act = act
        self.data_format = data_format
        self.dilation_rate = dilation_rate
        self.W_init = W_init
        self.b_init = b_init
        self.W_init_args = W_init_args  # TODO: Remove when TF <1.3 not supported
        self.b_init_args = b_init_args  # TODO: Remove when TF <1.3 not supported

        logging.info(
            "DeConv2d {}: n_filters: {} strides: {} padding: {} act: {} dilation: {}"
            .format(
                self.name,
                str(n_filter),
                str(strides),
                padding,
                self.act.__name__ if self.act is not None else 'No Activation',
                dilation_rate,
            ))

        if len(strides) != 2:
            raise ValueError(
                "len(strides) should be 2, DeConv2d and DeConv2dLayer are different."
            )
Beispiel #50
0
    def __init__(
            self,  #prev_layer,
            n_filter=32,
            filter_size=5,
            stride=1,
            dilation_rate=1,
            act=None,
            padding='SAME',
            data_format="channels_last",
            W_init=tl.initializers.truncated_normal(stddev=0.02),
            b_init=tl.initializers.constant(value=0.0),
            # W_init=tf.compat.v1.initializers.truncated_normal(stddev=0.02),
            # b_init=tf.compat.v1.initializers.constant(value=0.0),
            # W_init_args=None,
            # b_init_args=None,
            use_cudnn_on_gpu=None,
            in_channels=None,
            name=None,  #'conv1d'
    ):
        # super(Conv1d, self
        #      ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)
        super().__init__(name)
        self.n_filter = n_filter
        self.filter_size = filter_size
        self.stride = stride
        self.act = act
        self.padding = padding
        self.dilation_rate = dilation_rate
        self.W_init = W_init
        self.b_init = b_init
        self.in_channels = in_channels

        if self.in_channels:
            self.build(None)
            self._built = True

        # self.W_init_args = W_init_args
        # self.b_init_args = b_init_args
        # FIXME: Don't know the use of use_cudnn_on_gpu
        self.use_cudnn_on_gpu = use_cudnn_on_gpu
        logging.info(
            "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s dilation_rate: %d"
            % (self.name, n_filter, filter_size, stride, padding,
               self.act.__name__ if self.act is not None else 'No Activation',
               dilation_rate))
Beispiel #51
0
    def __init__(
            self,
            vocabulary_size,
            embedding_size,
            E_init=tl.initializers.random_uniform(-0.1, 0.1),
            name=None,  #'embedding',
    ):
        super(Embedding, self).__init__(name)
        self.vocabulary_size = vocabulary_size
        self.embedding_size = embedding_size
        self.E_init = E_init

        if not self._built:
            self.build(tuple())
            self._built = True

        logging.info("Embedding %s: (%d, %d)" %
                     (self.name, self.vocabulary_size, self.embedding_size))
Beispiel #52
0
    def __init__(
            self, filter_size=(3, 3), strides=(2, 2), padding='SAME', data_format='channels_last',
            name=None, #'maxpool2d'
    ):
        if strides is None:
            strides = filter_size

        # super(MaxPool2d, self).__init__(prev_layer=prev_layer, name=name)
        super().__init__(name)
        self.filter_size=filter_size
        self.strides=strides
        self.padding=padding
        self.data_format=data_format

        logging.info(
            "MaxPool2d %s: filter_size: %s strides: %s padding: %s" %
            (self.name, str(filter_size), str(strides), str(padding))
        )
Beispiel #53
0
    def restore_params(self, sess):
        logging.info("Restore pre-trained parameters")
        maybe_download_and_extract(
            'vgg16_weights.npz',
            'models',
            'http://www.cs.toronto.edu/~frossard/vgg16/',
            expected_bytes=553436134)
        npz = np.load(os.path.join('models', 'vgg16_weights.npz'))

        params = []
        for val in sorted(npz.items()):
            logging.info("  Loading params %s" % str(val[1].shape))
            params.append(val[1])
            if len(self.all_params) == len(params):
                break

        assign_params(sess, params, self.net)
        del params
Beispiel #54
0
def restore_params(network, model_path='models.npz'):
    logging.info("Restore pre-trained weights")

    try:
        npz = np.load(model_path, allow_pickle=True)
    except:
        print("Download the model file, placed in the /model ")
        print("Weights download: ", weights_url['link'], "password:"******"r")
    line = f.readlines()
    for i in range(len(line)):
        network.all_weights[i].assign(npz[line[i].strip()])
        logging.info(
            "  Loading weights %s in %s" %
            (network.all_weights[i].shape, network.all_weights[i].name))
Beispiel #55
0
    def __init__(
            self,
            mean=0.0,
            stddev=1.0,
            is_train=True,
            seed=None,
            name=None,  # 'gaussian_noise',
    ):
        super().__init__(name)
        self.mean = mean
        self.stddev = stddev
        self.seed = seed
        self.is_train = is_train

        self.build()
        self._built = True

        logging.info("GaussianNoise %s: mean: %f stddev: %f" % (self.name, self.mean, self.stddev))
Beispiel #56
0
    def __init__(
            self,
            # inputs,
            vocabulary_size=80000,
            embedding_size=200,
            E_init=tf.random_uniform_initializer(-0.1, 0.1),
            E_init_args=None,
            name=None,  #'embedding',
    ):
        # super(EmbeddingInput, self).__init__(prev_layer=inputs, E_init_args=E_init_args, name=name)
        super().__init__(name)
        self.vocabulary_size = vocabulary_size
        self.embedding_size = embedding_size
        self.E_init = E_init
        self.E_init_args = E_init_args

        logging.info("EmbeddingInput %s: (%d, %d)" %
                     (self.name, self.vocabulary_size, self.embedding_size))
Beispiel #57
0
    def __init__(
            self,
            prev_layer,
            padding=None,
            mode='CONSTANT',
            name='pad_layer',
    ):
        super(PadLayer, self).__init__(prev_layer=prev_layer, name=name)

        logging.info("PadLayer   %s: padding: %s mode: %s" % (self.name, list(padding), mode))

        if not isinstance(padding, tf.Tensor):
            raise Exception(
                "padding should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad"
            )

        self.outputs = tf.pad(self.inputs, paddings=padding, mode=mode, name=name)
        self._add_layers(self.outputs)
Beispiel #58
0
    def __init__(
            self,
            depth_radius=None,
            bias=None,
            alpha=None,
            beta=None,
            name=None,  #'lrn',
    ):
        # super(LocalResponseNorm, self).__init__(prev_layer=prev_layer, name=name)
        super().__init__(name)
        self.depth_radius = depth_radius
        self.bias = bias
        self.alpha = alpha
        self.beta = beta

        logging.info(
            "LocalResponseNorm %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s"
            % (self.name, str(depth_radius), str(bias), str(alpha), str(beta)))
    def __init__(
            self,
            prev_layer,
            act=None,
            epsilon=1e-5,
            beta_init=tf.constant_initializer(0.0),
            gamma_init=tf.constant_initializer(1.0),
            moving_mean_init=tf.zeros_initializer(),
            name='switchnorm_layer',
    ):
        super(SwitchNormLayer, self).__init__(prev_layer=prev_layer, act=act, name=name)

        logging.info(
            "SwitchNormLayer %s: epsilon: %f act: %s" %
            (self.name, epsilon, self.act.__name__ if self.act is not None else 'No Activation')
        )

        with tf.variable_scope(name):
            x = self.inputs
            ch = x.shape[-1]
            epsilon = 1e-5

            batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
            ins_mean, ins_var = tf.nn.moments(x, [1, 2], keep_dims=True)
            layer_mean, layer_var = tf.nn.moments(x, [1, 2, 3], keep_dims=True)

            gamma = tf.get_variable("gamma", [ch], initializer=gamma_init)
            beta = tf.get_variable("beta", [ch], initializer=beta_init)

            mean_weight_var = tf.get_variable("mean_weight", [3], initializer=tf.constant_initializer(1.0))
            var_weight_var = tf.get_variable("var_weight", [3], initializer=tf.constant_initializer(1.0))

            mean_weight = tf.nn.softmax(mean_weight_var)
            var_weight = tf.nn.softmax(var_weight_var)

            mean = mean_weight[0] * batch_mean + mean_weight[1] * ins_mean + mean_weight[2] * layer_mean
            var = var_weight[0] * batch_var + var_weight[1] * ins_var + var_weight[2] * layer_var

            x = (x - mean) / (tf.sqrt(var + epsilon))
            self.outputs = x * gamma + beta
            self.outputs = self._apply_activation(self.outputs)

        self._add_layers(self.outputs)
        self._add_params([beta, gamma, mean_weight_var, var_weight_var])
Beispiel #60
0
def get_variables_with_name(name=None, train_only=True, verbose=False):
    """Get a list of TensorFlow variables by a given name scope.

    Parameters
    ----------
    name : str
        Get the variables that contain this name.
    train_only : boolean
        If Ture, only get the trainable variables.
    verbose : boolean
        If True, print the information of all variables.

    Returns
    -------
    list of Tensor
        A list of TensorFlow variables

    Examples
    --------
    >>> import tensorlayer as tl
    >>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)

    """
    if name is None:
        raise Exception("please input a name")

    logging.info("  [*] geting variables with %s" % name)

    # tvar = tf.trainable_variables() if train_only else tf.all_variables()
    if train_only:
        t_vars = tf.trainable_variables()

    else:
        t_vars = tf.global_variables()

    d_vars = [var for var in t_vars if name in var.name]

    if verbose:
        for idx, v in enumerate(d_vars):
            logging.info("  got {:3}: {:15}   {}".format(
                idx, v.name, str(v.get_shape())))

    return d_vars