Esempio n. 1
0
    def __init__(self,
                 n_components,
                 opu=None,
                 ndims=1,
                 n_2d_features=None,
                 packed=False,
                 simulated=False,
                 max_n_features=None,
                 verbose_level=-1,
                 linear=False):
        if verbose_level >= 0:
            lightonml.set_verbose_level(verbose_level)
        self.verbose_level = lightonml.get_verbose_level()
        super(OPUMap, self).__init__()
        if opu is None:
            if simulated:
                simulated_opu = SimulatedOpuDevice()
                if max_n_features is None:
                    raise ValueError(
                        "When using simulated=True, you need to provide max_n_features."
                    )
                self.opu = OPU(opu_device=simulated_opu,
                               max_n_features=max_n_features,
                               n_components=n_components)
            else:
                self.opu = OPU(n_components=n_components)
        else:
            self.opu = opu
            self.opu.n_components = n_components
            if simulated and not isinstance(opu.device, SimulatedOpuDevice):
                warnings.warn(
                    "You provided a real OPU object but set simulated=True."
                    " Will use the real OPU.")
            if isinstance(opu.device, SimulatedOpuDevice) and not simulated:
                warnings.warn(
                    "You provided a simulated OPU object but set simulated=False. "
                    "Will use simulated OPU.")
        self.n_components = self.opu.n_components
        if ndims not in [1, 2]:
            raise ValueError("Number of input dimensions must be 1 or 2")
        self.ndims = ndims
        self.n_2d_features = n_2d_features
        self.packed = packed
        self.simulated = simulated
        self.linear = linear
        self.max_n_features = max_n_features

        self.fitted = False
        self.online = False
        if lightonml.get_verbose_level() >= 1:
            print("OPU output is detached from the computational graph.")
Esempio n. 2
0
    def __init__(self,
                 n_components,
                 opu=None,
                 ndims=1,
                 n_2d_features=None,
                 packed=False,
                 simulated=False,
                 max_n_features=None,
                 verbose_level=-1,
                 linear=False):
        # verbose_level shouldn't be used anymore, but put it as attributes
        # in order to comply with sklearn estimator
        if verbose_level >= 0:
            lightonml.set_verbose_level(verbose_level)
        self.verbose_level = lightonml.get_verbose_level()

        if opu is None:
            if simulated:
                simulated_opu_device = SimulatedOpuDevice()
                if max_n_features is None:
                    raise ValueError(
                        "When using simulated=True, you need to provide max_n_features."
                    )
                self.opu = OPU(opu_device=simulated_opu_device,
                               max_n_features=max_n_features,
                               n_components=n_components)
            else:
                self.opu = OPU(n_components=n_components)
        else:
            self.opu = opu
            self.opu.n_components = n_components
            if simulated and not isinstance(opu.device, SimulatedOpuDevice):
                warnings.warn(
                    "You provided a real OPU object but set simulated=True."
                    " Will use the real OPU.")
            if isinstance(opu.device, SimulatedOpuDevice) and not simulated:
                warnings.warn(
                    "You provided a simulated OPU object but set simulated=False."
                    " Will use simulated OPU.")

        if ndims not in [1, 2]:
            raise ValueError("Number of input dimensions must be 1 or 2")
        self.ndims = ndims
        self.n_2d_features = n_2d_features
        self.packed = packed
        self.simulated = simulated
        self.linear = linear
        self.max_n_features = max_n_features
        self.fitted = False
Esempio n. 3
0
def transform(n_images,
              n_features,
              n_components=0,
              disable_pbar=False,
              linear=False,
              config_file=""):
    opu = OPU(disable_pbar=disable_pbar,
              open_at_init=False,
              config_file=config_file)
    if n_components != 0:
        opu.n_components = n_components
    ins = np.ones((n_images, n_features), dtype=np.uint8)

    with opu:
        print(opu.version())
        begin = time.time()
        opu.fit1d(ins)
        if linear:
            opu.linear_transform(ins)
        else:
            opu.transform(ins)
        elapsed = time.time() - begin
        print(
            f"{n_images} transforms in {elapsed:.2f} s ({n_images / elapsed:.2f} Hz)"
        )
Esempio n. 4
0
def transform(n_images, n_features, n_components=0, disable_pbar=False):
    opu = OPU(disable_pbar=disable_pbar, open_at_init=False)
    if n_components != 0:
       opu.n_components = n_components
    ins = np.ones((n_images, n_features), dtype=np.uint8)

    with opu:
        print(opu.version())
        begin = time.time()
        opu.fit_transform1d(ins)
        elapsed = time.time() - begin
        print("{:d} transforms in {:.2f} s ({:.2f} Hz)".format(n_images,
                                                               elapsed, n_images / elapsed))
Esempio n. 5
0
def main():
    print("LightOn OPU version ", lgopu_version)
    parser = argparse.ArgumentParser()
    parser.add_argument("-n",
                        "--nbimages",
                        type=int,
                        help="number of images",
                        default=3000)
    args = parser.parse_args()
    n_images = args.nbimages

    with OPU() as opu:
        print("transforms without formatting")
        ins = np.ones((n_images, opu.max_n_features), dtype=np.uint8)
        ins_packed = np.packbits(ins, axis=1)
        with benchmark(n_images):
            opu.fit_transform1d(ins_packed, packed=True)

        n_features = 1000
        print("1D linear transforms with formatting")
        ins = np.ones((n_images, n_features), dtype=np.uint8)
        with benchmark(n_images):
            opu.fit1d(ins)
            opu.linear_transform(ins)

        print("1D transforms with formatting")
        ins = np.ones((n_images, n_features), dtype=np.uint8)
        with benchmark(n_images):
            opu.fit_transform1d(ins)

        print("Online transform")
        n_online = 1000
        with benchmark(n_online):
            opu.fit1d(n_features=n_features, online=True)
            for _ in range(n_online):
                opu.transform(ins[0])
Esempio n. 6
0
    # Of course, the equivalent for packed input of input device's size
    # will work the same way

    # It can also be a batch of them
    many_raw_2d = random_bin((100, ) + input_shape)
    out_many_raw_2d = opu.fit_transform2d(many_raw_2d)
    print("Many raw out shape", out_many_raw_2d.shape)   # (100, opu.n_components)

    # The online mode allows you to run accelerate the run of single vectors:
    n_features1d = 1200
    opu.fit1d(n_features=n_features1d, online=True)
    for _ in range(10):
        online_out = opu.transform(random_bin(n_features1d))
    print("Online out shape", online_out.shape)

    n_features2d = (50, 50)
    opu.fit2d(n_features=n_features2d, online=True)
    for _ in range(10):
        online_out = opu.transform(random_bin(n_features2d))
    print("Online out shape", online_out.shape)


def random_bin(shape):
    """Generates a random vector of 0s and 1s"""
    return np.random.randint(0, 2, size=shape, dtype=bool)


if __name__ == '__main__':
    opu_ = OPU()
    opu_tutorial(opu_)