Пример #1
0
def test_parafac2_normalize_factors():
    rng = check_random_state(1234)
    rank = 2  # Rank 2 so we only need to test rank of minimum and maximum

    random_parafac2_tensor = random_parafac2(
        shapes=[(15 + rng.randint(5), 30) for _ in range(25)],
        rank=rank,
        random_state=rng,
    )
    random_parafac2_tensor.factors[0] = random_parafac2_tensor.factors[0] + 0.1
    norms = tl.ones(rank)
    for factor in random_parafac2_tensor.factors:
        norms = norms * tl.norm(factor, axis=0)

    slices = parafac2_to_tensor(random_parafac2_tensor)

    unnormalized_rec = parafac2(slices,
                                rank,
                                random_state=rng,
                                normalize_factors=False)
    assert unnormalized_rec.weights[0] == 1

    normalized_rec = parafac2(slices,
                              rank,
                              random_state=rng,
                              normalize_factors=True,
                              n_iter_max=1000)
    assert tl.max(tl.abs(T.norm(normalized_rec.factors[0], axis=0) - 1)) < 1e-5
    assert abs(tl.max(norms) -
               tl.max(normalized_rec.weights)) / tl.max(norms) < 1e-2
    assert abs(tl.min(norms) -
               tl.min(normalized_rec.weights)) / tl.min(norms) < 1e-2
Пример #2
0
def test_clip():
    # Test that clip can work with single arguments
    X = T.tensor([0.0, -1.0, 1.0])
    X_low = T.tensor([0.0, 0.0, 1.0])
    X_high = T.tensor([0.0, -1.0, 0.0])
    assert_array_equal(tl.clip(X, a_min=0.0), X_low)
    assert_array_equal(tl.clip(X, a_max=0.0), X_high)

    # More extensive test with a larger random tensor
    rng = tl.check_random_state(0)
    tensor = tl.tensor(rng.random_sample((10, 10, 10)).astype('float32'))

    val1 = np.float32(rng.random_sample())
    val2 = np.float32(rng.random_sample())
    limits = [(min(val1, val2), max(val1, val2)), (-1, 2),
              (tl.max(tensor) + 1, None), (None, tl.min(tensor) - 1),
              (tl.max(tensor), None), (tl.min(tensor), None),
              (None, tl.max(tensor)), (None, tl.min(tensor))]

    for min_val, max_val in limits:
        message = f"Tensor clipped incorrectly with min_val={min_val} and max_val={max_val}. Tensor bounds are ({tl.to_numpy(tl.min(tensor))}, {tl.to_numpy(tl.max(tensor))}"
        if min_val is not None:
            assert tl.all(tl.clip(tensor, min_val, None) >= min_val), message
            assert tl.all(
                tl.clip(tensor, min_val, max_val) >= min_val), message
        if max_val is not None:
            assert tl.all(tl.clip(tensor, None, max_val) <= max_val), message
            assert tl.all(
                tl.clip(tensor, min_val, max_val) <= max_val), message
Пример #3
0
def print_weight(model_path):
    model = load_model(model_path)

    conv_norm = list()
    conv_max = list()
    conv_min = list()

    depconv_norm = list()
    depconv_max = list()
    depconv_min = list()

    for layer in model.layers:
        print(layer.name)
        if layer.get_weights() and type(layer).__name__ != 'DepthwiseConv2D':
            conv, _ = layer.get_weights()
            conv_norm.append(tt.norm(conv))
            conv_max.append(tt.max(conv))
            conv_min.append(tt.min(conv))

#            plt.figure(figsize=(10,10))
#            plt.imshow(my_unfold(conv, -1))
#            plt.colorbar()
#            plt.show()
#            plt.clf()

        elif layer.get_weights() and type(layer).__name__ == 'DepthwiseConv2D':
            w = layer.get_weights()
            depconv_norm.append(tt.norm(w[0]))
            depconv_max.append(tt.max(w[0]))
            depconv_min.append(tt.min(w[0]))


#            plt.figure(figsize=(20,20))
#            plt.imshow(my_unfold(w[0], -1))
#            plt.colorbar()
#            plt.show()
#            plt.clf()

    conv_info = [conv_norm, conv_max, conv_min]
    depconv_info = [depconv_norm, depconv_max, depconv_min]

    return conv_info, depconv_info
Пример #4
0
    def test_R2X(self):
        """Test to ensure R2X for higher components is larger."""
        tensor = tl.tensor(np.random.rand(12, 10, 15))
        arr = [find_R2X(tensor, perform_decomposition(tensor, i)) for i in range(1, 8)]

        for j in range(len(arr) - 1):
            self.assertTrue(arr[j] < arr[j + 1])

        # confirm R2X is >= 0 and <=1
        self.assertGreaterEqual(tl.min(arr), 0)
        self.assertLessEqual(tl.max(arr), 1)
Пример #5
0
def tl_sample_uniform(tensor, nsamp):
    """Uniformly sample 'nsamp' indices from a tensor 'tensor' along with
    corresponding values and the weight of the sample.

    Parameters
    ----------
    X : ndarray
      Dense tensor
    nsamp : integer
      number of samples

    Returns
    -------
    subs : ndarray
        Subscripts (indices)
    vals : ndarray
        Values
    wgts : ndarray
        Weights
    """
    d = tl.ndim(tensor)
    shp = tl.shape(tensor)
    tsz = 1
    for i in shp:
        tsz *= i

    # generate subscripts
    subSamp = lambda x, y: np.ceil(x * y)
    subs = subSamp(np.random.rand(nsamp, d), shp)
    subs = subs.astype(int) - 1  # adjust for zero-indexing

    # quick check that indices are in bounds
    if tl.min(subs) < 0:
        print("Bad subscripts generated for sampling.")
        sys.exit(1)

    # capture corresponding values for subscripts
    vals = []
    for i in subs:
        index = tuple(i.tolist())
        vals.append(tensor[index])
    vals = tl.reshape(tl.tensor(vals), (len(vals), 1))

    # calculate weights for sample
    wgts = tsz / nsamp * tl.ones((nsamp, 1))

    return subs, vals, wgts
Пример #6
0
def active_set_nnls(Utm, UtU, x=None, n_iter_max=100, tol=10e-8):
    """
     Active set algorithm for non-negative least square solution.

     Computes an approximate non-negative solution for Ux=m linear system.

     Parameters
     ----------
     Utm : vectorized ndarray
        Pre-computed product of the transposed of U and m
     UtU : ndarray
        Pre-computed Kronecker product of the transposed of U and U
     x : init
        Default: None
     n_iter_max : int
         Maximum number of iteration
         Default: 100
     tol : float
         Early stopping criterion

     Returns
     -------
     x : ndarray

     Notes
     -----
     This function solves following problem:
     .. math::
        \\begin{equation}
             \\min_{x} ||Ux - m||^2
        \\end{equation}

     According to [1], non-negativity-constrained least square estimation problem becomes:
     .. math::
        \\begin{equation}
             x' = (Utm) - (UTU)\\times x
        \\end{equation}

     Reference
     ----------
     [1] : Bro, R., & De Jong, S. (1997). A fast non‐negativity‐constrained
           least squares algorithm. Journal of Chemometrics: A Journal of
           the Chemometrics Society, 11(5), 393-401.
     """
    if tl.get_backend() == 'tensorflow':
        raise ValueError(
            "Active set is not supported with the tensorflow backend. Consider using fista method with tensorflow."
        )

    if x is None:
        x_vec = tl.zeros(tl.shape(UtU)[1], **tl.context(UtU))
    else:
        x_vec = tl.base.tensor_to_vec(x)

    x_gradient = Utm - tl.dot(UtU, x_vec)
    passive_set = x_vec > 0
    active_set = x_vec <= 0
    support_vec = tl.zeros(tl.shape(x_vec), **tl.context(x_vec))

    for iteration in range(n_iter_max):

        if iteration > 0 or tl.all(x_vec == 0):
            indice = tl.argmax(x_gradient)
            passive_set = tl.index_update(passive_set, tl.index[indice], True)
            active_set = tl.index_update(active_set, tl.index[indice], False)
        # To avoid singularity error when initial x exists
        try:
            passive_solution = tl.solve(UtU[passive_set, :][:, passive_set],
                                        Utm[passive_set])
            indice_list = []
            for i in range(tl.shape(support_vec)[0]):
                if passive_set[i]:
                    indice_list.append(i)
                    support_vec = tl.index_update(
                        support_vec, tl.index[int(i)],
                        passive_solution[len(indice_list) - 1])
                else:
                    support_vec = tl.index_update(support_vec,
                                                  tl.index[int(i)], 0)
        # Start from zeros if solve is not achieved
        except:
            x_vec = tl.zeros(tl.shape(UtU)[1])
            support_vec = tl.zeros(tl.shape(x_vec), **tl.context(x_vec))
            passive_set = x_vec > 0
            active_set = x_vec <= 0
            if tl.any(active_set):
                indice = tl.argmax(x_gradient)
                passive_set = tl.index_update(passive_set, tl.index[indice],
                                              True)
                active_set = tl.index_update(active_set, tl.index[indice],
                                             False)
            passive_solution = tl.solve(UtU[passive_set, :][:, passive_set],
                                        Utm[passive_set])
            indice_list = []
            for i in range(tl.shape(support_vec)[0]):
                if passive_set[i]:
                    indice_list.append(i)
                    support_vec = tl.index_update(
                        support_vec, tl.index[int(i)],
                        passive_solution[len(indice_list) - 1])
                else:
                    support_vec = tl.index_update(support_vec,
                                                  tl.index[int(i)], 0)

        # update support vector if it is necessary
        if tl.min(support_vec[passive_set]) <= 0:
            for i in range(len(passive_set)):
                alpha = tl.min(
                    x_vec[passive_set][support_vec[passive_set] <= 0] /
                    (x_vec[passive_set][support_vec[passive_set] <= 0] -
                     support_vec[passive_set][support_vec[passive_set] <= 0]))
                update = alpha * (support_vec - x_vec)
                x_vec = x_vec + update
                passive_set = x_vec > 0
                active_set = x_vec <= 0
                passive_solution = tl.solve(
                    UtU[passive_set, :][:, passive_set], Utm[passive_set])
                indice_list = []
                for i in range(tl.shape(support_vec)[0]):
                    if passive_set[i]:
                        indice_list.append(i)
                        support_vec = tl.index_update(
                            support_vec, tl.index[int(i)],
                            passive_solution[len(indice_list) - 1])
                    else:
                        support_vec = tl.index_update(support_vec,
                                                      tl.index[int(i)], 0)

                if tl.any(passive_set) != True or tl.min(
                        support_vec[passive_set]) > 0:
                    break
        # set x to s
        x_vec = tl.clip(support_vec, 0, tl.max(support_vec))

        # gradient update
        x_gradient = Utm - tl.dot(UtU, x_vec)

        if tl.any(active_set) != True or tl.max(x_gradient[active_set]) <= tol:
            break

    return x_vec
Пример #7
0
def gcp(X, R, type='normal', func=None, grad=None, lower=None,\
        opt='lbfgsb', mask=None, maxiters=1000, \
        init='random', printitn=10, state=None, factr=1e7, pgtol=1e-4, \
        fsamp=None, gsamp=None, oversample=1.1, sampler='uniform', \
        fsampler=None, rate=1e-3, decay=0.1, maxfails=1, epciters=1000, \
        festtol=-math.inf, beta1=0.9, beta2=0.999, epsilon=1e-8):
    """Generalized CANDECOMP/PARAFAC (GCP) decomposition via all-at-once optimization (OPT) [1]
    Computes a rank-'R' decomposition of 'tensor' such that::

      tensor = [|weights; factors[0], ..., factors[-1] |].

    GCP-OPT allows the use of a variety of statistically motivated loss functions
    suited to the data held in a tensor (i.e. continuous, discrete, binary, etc)

    Parameters
    ----------
    X : ndarray
        Tensor to factorize
        **COMING SOON**
        Sparse tensor support
    R : int
        Rank of decomposition (Number of components).
    type : str,
        Type of objective function used
        Options include:
            'normal' or 'gaussian'          - Gaussian for real-valued data (DEFAULT)
            'binary' or 'bernoulli-odds'    - Bernoulli w/ odds link for binary data
            'bernoulli-logit'               - Bernoulli w/ logit link for binary data
            'count' or 'poisson'            - Poisson for count data
            'poisson-log'                   - Poisson w/ log link for count data
            'rayleigh'                      - Rayleigh distribution for real-valued data
            'gamma'                         - Gamma distribution for non-negative real-valued data
        **COMING SOON**:
            'huber (DELTA)                  - Similar to Gaussian, for real-valued data
            'negative-binomial (r)'         - Negative binomial for count data
            'beta (BETA)'                   - Beta divergence for non-negative real-valued data
            'user-specified'                - Customized objective function provided by user
    func: lambda function
        User specified custom objective function, eg. lambda x, m: (m-x)**2
    grad: lambda function
        User specified custom gradient function, eg. lambda x, m: 2*(m-x)
    lower: 0 or -inf
        Lower bound for custom objective/gradient
    opt : str
        Optimization method
        Options include:
            'lbfgsb'    - Bound-constrained limited-memory BFGS
            'sgd'       - Stochastic gradient descent (SGD)
            **COMING SOON**
            'adam'      - Momentum-based SGD method
            'adagrad'   - Adaptive gradient algorithm, well suited for sparse data
        If 'tensor' is dense, all 4 options can be used, 'lbfgsb' by default.
        **COMING SOON** - Sparse format support
        If 'tensor' is sparse, only 'sgd', 'adam' and 'adagrad' can be used, 'adam' by
        default.
        Each method has specific parameters, see documentation
    mask : ndarray
        Specifies a mask, 0's for missing/incomplete entries, 1's elsewhere, with
        the same shape as 'tensor'.
        **COMING SOON** - Missing/incomplete data simulation.
    maxiters : int
        Maximum number of outer iterations, 1000 by default.
    init : {'random', 'svd', cptensor}
        Initialization for factor matrices, 'random' by default.
        Options include:
            'random'    - random initialization from a uniform distribution on [0,1)
            'svd'       - initialize the `m`th factor matrix using the `rank` left
                          singular vectors of the `m`th unfolding of the input tensor.
            cptensor    - initialization provided by user.  NOTE: weights are pulled
                          in the last factor and then the weights are set to "1" for
                          the output tensor.
        Initializations all result in a cptensor where the weights are one.
    printitn : int
        Print every n iterations; 0 for no printing, 10 by default.
    state : {None, int, np.random.RandomState}
        Seed for reproducable random number generation
    factr : float
        (L-BFGS-B parameter)
        Tolerance on the change of objective values. Defaults to 1e7.
    pgtol : float
        (L-BFGS-B parameter)
        Projected gradient tolerance.  Defaults to 1e-5
    sampler : {uniform, stratified, semi-stratified}
        Type of sampling to use for stochastic gradient (SGD/ADAM/ADAGRAD).
        Defaults to 'uniform' for dense tensors.
        Defaults to 'stratified' for sparse tensors.
        Options include:
            'uniform'           - Uniform random sampling
            **COMING SOON**
            'stratified'        - Stratified sampling, targets sparse data. Zero and
                                  nonzero values sampled separately.
            'semi-stratified'   - Similar to stratified sampling, but is more
                                  computationally efficient (See papers referenced).
    gsamp : int
        Number of samples for stochastic gradient (SGD/ADAM/ADAGRAD parameter).
        Generally set to be O(sum(shape)*R).
        **COMING SOON**
        For stratified or semi-stratified, this may be two numbers:
            - the number of nnz samples
            - the number of zero samples.
        If only one number is specified, then this value is used for both nnzs and
        zeros (total number of samples is 2x specified value in this case).
    fsampler : {'uniform', 'stratified', custom}
        Type of sampling for estimating objective function (SGD/ADAM/ADAGRAD parameter).
        Options include:
            'uniform'       - Uniform random sampling
            **COMING SOON**
            'stratified'    - Stratified sampling, targets sparse data. Zero and
                              nonzero values sampled separately.
            custom          - User-defined sampler (lambda function). Custom option
                              is primarily useful in reusing sampled elements across
                              multiple tests.
    fsamp : int
        (SGD/ADAM/ADAGRAD parameter)
        Number of samples to estimate objective function.
        This should generally be somewhat large since we want this sample to generate a
        reliable estimate of the true function value.
    oversample : float
        (Stratified sampling parameter)
        Factor to oversample when implicitly sampling zeros in the sparse case.
        Defaults to 1.1. Only adjust for very small tensors.
    rate : float
        (SGD/ADAM parameter)
        Initial learning rate. Defaults to 1e-3.
    decay : float
        (SGD/ADAM parameter)
        Amount to decrease learning rate when progress stagnates, i.e. no change in
        objective function between epochs.  Defaults to 0.1.
    maxfails : int
        (SGD/ADAM parameter)
        Number of times to decrease the learning rate.
        Defaults to 1, may be set to zero.
    epciters : int
        (SGD/ADAM parameter)
        Iterations per epoch. Defaults to 1000.
    festtol : float
        (SGD/ADAM parameter)
        Quit estimation of function if it goes below this level.
        Defaults to -inf.
    beta1 : float
        (ADAM parameter)    - generally doesn't need to be changed
        Defaults to 0.9
    beta2 : float
        (ADAM parameter)    - generally doesn't need to be changed
        Defaults to 0.999
    epsilon : float
        (ADAM parameter)    - generally doesn't need to be changed
        Defaults to 1e-8

    Returns
    -------
    Mfin : CPTensor
        Canonical polyadic decomposition of input tensor X

    Reference
    ---------
    [1] D. Hong, T. G. Kolda, J. A. Duersch, Generalized Canonical
        Polyadic Tensor Decomposition, SIAM Review, 62:133-163, 2020,
        https://doi.org/10.1137/18M1203626
    [2] T. G. Kolda, D. Hong, Stochastic Gradients for Large-Scale Tensor
        Decomposition. SIAM J. Mathematics of Data Science, 2:1066-1095,
        2020, https://doi.org/10.1137/19m1266265

    """
    # Timer - Setup (outside optimization)
    start_setup0 = time.perf_counter()

    # Initial setup
    nd = tl.ndim(X)
    sz = tl.shape(X)
    tsz = X.size
    X_context = tl.context(X)
    vecsz = 0
    for i in range(nd):
        # tsz *= sz[i]
        vecsz += sz[i]
    vecsz *= R
    W = mask

    # Random set-up
    if state is not None:
        state = tl.check_random_state(state)

    # capture stats(nnzs, zeros, missing)
    nnonnzeros = 0
    X = tl.tensor_to_vec(X)
    for i in X:
        if i > 0:
            nnonnzeros += 1
    X = tl.reshape(X, sz)
    nzeros = tsz - nnonnzeros
    nmissing = 0
    if W is not None:
        W = tl.tensor_to_vec(W)
        for i in range(tl.shape(W)[0]):
            if W[i] > 0: nmissing += 1  # TODO: is this right??
        W = tl.reshape(W, sz)

    # Dictionary for storing important information regarding the decomposition problem
    info = {}
    info['tsz'] = tsz
    info[
        'nmissing'] = 0  # TODO: revisit once missing value functionality incorporated
    info['nnonnzeros'] = nnonnzeros
    info[
        'nzeros'] = nzeros  # TODO: revisit once missing value functionality incorporated

    # Set up function, gradient, and bounds
    fh, gh, lb = validate_type(type, X)
    info['type'] = type
    info['fh'] = fh
    info['gh'] = gh
    info['lb'] = lb

    # initialize CP-tensor and make a copy to work with so as to have the starting guess
    M0 = initialize_cp(X, R, init=init, random_state=state)
    wghts0 = tl.copy(M0[0])
    fcts0 = []
    for i in range(nd):
        f = tl.copy(M0[1][i])
        fcts0.append(f)
    M = CPTensor((wghts0, fcts0))

    # Lambda weights are assumed to be all ones throughout, check initial guess satisfies assumption
    if not tl.all(M[0]):
        print("Initialization of CP tensor has failed (lambda weight(s) != 1.")
        sys.exit(1)

    # check optimization method
    if validate_opt(opt):
        print("Choose optimization method from: {lbfgsb, sgd}")
        sys.exit(1)
    use_stoc = False
    if opt != 'lbfgsb':
        use_stoc = True
    info['opt'] = opt

    # set up for stochastic optimization (e.g. sgd, adam, adagrad)
    if use_stoc:
        # set up fsampler, gsampler ---> uniform sampling only for now
        # TODO : add stratified, semi-stratified and user-specified sampling options
        if not sampler == "uniform":
            print(
                "Only uniform sampling currently supported for stochastic optimization."
            )
            sys.exit(1)
        fsampler_type = sampler
        gsampler_type = sampler

        # setup fsampler
        f_samp = fsamp
        if f_samp == None:
            upper = np.maximum(math.ceil(tsz / 10), 10 ^ 6)
            f_samp = np.minimum(upper, tsz)

        # set up lambda function/function handle for uniform sampling
        fsampler = lambda: tl_sample_uniform(X, f_samp)
        fsampler_str = "{} with {} samples".format(fsampler_type, f_samp)

        # setup gsampler
        g_samp = gsamp
        if g_samp == None:
            upper = np.maximum(1000, math.ceil(10 * tsz / maxiters))
            g_samp = np.minimum(upper, tsz)

        # setup lambda function/function handle for uniform sampling
        gsampler = lambda: tl_sample_uniform(X, g_samp)
        gsampler_str = "{} with {} samples".format(gsampler_type, g_samp)

        # capture the info
        info['fsampler'] = fsampler_str
        info['gsampler'] = gsampler_str
        info['fsamp'] = f_samp
        info['gsamp'] = g_samp

    time_setup0 = time.perf_counter() - start_setup0

    # Welcome message
    if printitn > 0:
        print("GCP-OPT-{} (Generalized CP Tensor Decomposition)".format(opt))
        print("------------------------------------------------")
        print("Tensor size:\t\t\t\t{} ({} total entries)".format(sz, tsz))
        if nmissing > 0:
            print("Missing entries: {} ({})".format(nmissing,
                                                    100 * nmissing / tsz))
        print("Generalized function type:\t{}".format(type))
        print("Objective function:\t\t\t{}".format(
            inspect.getsource(fh).strip()))
        print("Gradient function:\t\t\t{}".format(
            inspect.getsource(gh).strip()))
        print("Lower bound of factors:\t\t{}".format(lb))
        print("Optimization method:\t\t{}".format(opt))
        if use_stoc:
            print("Max iterations (epochs): {}".format(maxiters))
            print("Iterations per epoch: {}".format(epciters))
            print("Learning rate / decay / maxfails: {} {} {}".format(
                rate, decay, maxfails))
            print("Function Sampler: {}".format(fsampler_str))
            print("Gradient Sampler: {}".format(gsampler_str))
        else:
            print("Max iterations:\t\t\t\t{}".format(maxiters))
            print("Projected gradient tol:\t\t{}\n".format(pgtol))

    # Make like a zombie and start decomposing
    Mfin = None
    # L-BFGS-B optimization
    if opt == 'lbfgsb':
        # Timer - Setup (inside optimization)
        start_setup1 = time.perf_counter()

        # set up bounds for l-bfgs-b if lb = 0
        bounds = None
        if lb == 0:
            lb = tl.zeros(tsz)
            ub = math.inf * tl.ones(tsz)
        fcn = lambda x: tl_gcp_fg(vec2factors(x, sz, R, X_context), X, fh, gh)
        m = factors2vec(M[1])

        # capture params for l-bfgs-b
        lbfgsb_params = {}
        lbfgsb_params['x0'] = factors2vec(M0.factors)
        lbfgsb_params['printEvery'] = printitn
        lbfgsb_params['maxIts'] = maxiters
        lbfgsb_params['maxTotalIts'] = maxiters * 10
        lbfgsb_params['factr'] = factr
        lbfgsb_params['pgtol'] = pgtol

        time_setup1 = time.perf_counter() - start_setup1

        if printitn > 0:
            print("Begin main loop")

        # Timer - Main operation
        start_main = time.perf_counter()
        x, f, info_dict = fmin_l_bfgs_b(fcn, m, approx_grad=False, bounds=None, \
                                        pgtol=pgtol, factr=factr, maxiter=maxiters)
        time_main = time.perf_counter() - start_main

        # capture info
        info['fcn'] = fcn
        info['lbfgsbopts'] = lbfgsb_params
        info['lbfgsbout'] = info_dict
        info['finalf'] = f

        if printitn > 0:
            print("\nFinal objective: {}".format(f))
            print("Setup time: {}".format(time_setup0 + time_setup1))
            print("Main loop time: {}".format(time_main))
            print("Outer iterations:"
                  )  # TODO: access this value (see manpage for fmin_l_bfgs_b)
            print("Total iterations: {}".format(info_dict['nit']))
            print("L-BFGS-B exit message: {} ({})".format(
                info_dict['task'], info_dict['warnflag']))
        Mfin = vec2factors(x, sz, R, X_context)

    # Stochastic optimization
    else:
        # Timer - Setup (inside optimization)
        start_setup1 = time.perf_counter()
        if opt == "adam" or opt == "adagrad":
            print("{} not currently supported".format(opt))
            sys.exit(1)
        # prepare for sgd
        # initialize moments
        m = []
        v = []

        # Extract samples for estimating function value (i.e. call fsampler), these never change
        fsubs, fvals, fwgts = fsampler()

        # Compute initial estimated function value
        fest = tl_gcp_fg_est(M, fh, gh, fsubs, fvals, fwgts, True, False,
                             False, False)

        # Set up loop variables
        nfails = 0
        titers = 0

        M_weights = tl.copy(M[0])
        M_factors = []
        for k in range(nd):
            M_factors.append(tl.copy(M[1][k]))
        Msave = CPTensor(
            (M_weights, M_factors))  # save a copy of the initial model
        msave = m
        vsave = v
        fest_prev = fest[0]

        # Tracing the progress in function value by epoch
        fest_trace = tl.zeros(maxiters + 1)
        step_trace = tl.zeros(maxiters + 1)
        time_trace = tl.zeros(maxiters + 1)
        fest_trace[0] = fest[0]

        # Print status
        if printitn > 0:
            print("Begin main loop")
            print("Initial f-est: {}".format(fest[0]))

        time_setup1 = time.perf_counter() - start_setup1
        start_main = time.perf_counter()
        time_trace[0] = time.perf_counter() - start_setup0

        # Main loop - outer iteration
        for nepoch in range(maxiters):
            step = (decay**nfails) * rate
            # Main loop - inner iteration
            for iter in range(epciters):
                # Tracking iterations
                titers = titers + 1

                # Select subset for stochastic gradient (i.e. call gsampler)
                gsubs, gvals, gwts = gsampler()

                # Compute gradients for each mode
                Gest = tl_gcp_fg_est(M, fh, gh, gsubs, gvals, gwts, False,
                                     True, False, False)

                # Check for inf gradient
                for g in Gest[0]:
                    g_max = tl.max(g)
                    g_min = tl.min(g)
                    if math.isinf(g_max) or math.isinf(g_min):
                        print(
                            "Infinite gradient encountered! (epoch = {}, iter = {})"
                            .format(nepoch, iter))

                # TODO : add functionality for ADAM and ADAGRAD optimization
                # Take gradient step
                for k in range(nd):
                    M.factors[k] = M.factors[k] - step * Gest[0][k]

            # Estimate objective (i.e. call tl_gcp_fg_est)
            fest = tl_gcp_fg_est(M, fh, gh, fsubs, fvals, fwgts, True, False,
                                 False, False)

            # Save trace (fest & step)
            fest_trace[nepoch + 1] = fest[0]
            step_trace[nepoch + 1] = step

            # Check convergence condition
            failed_epoch = False
            if fest[0] > fest_prev:
                failed_epoch = True
            if failed_epoch:
                nfails += 1
            festtol_test = False
            if fest[0] < festtol:
                festtol_test = True

            # Reporting
            if printitn > 0 and (nepoch % printitn == 0 or failed_epoch
                                 or festtol_test):
                print("Epoch {}: f-est = {}, step = {}".format(
                    nepoch, fest[0], step),
                      end='')
                if failed_epoch:
                    print(
                        ", nfails = {} (resetting to solution from last epoch)"
                        .format(nfails))
                print("")

            # Rectify failed epoch or save current solution
            if failed_epoch:
                M = Msave
                m = msave
                v = vsave
                fest[0] = fest_prev
                titers = titers - epciters
            else:
                Msave = CPTensor((tl.copy(M.weights), tl.copy(M.factors)))
                msave = m
                vsave = v
                fest_prev = fest[0]

            time_trace[nepoch] = time.perf_counter() - start_setup0

            if (nfails > maxfails) or festtol_test:
                break
        Mfin = M
        time_main = time.perf_counter() - start_main

        # capture info
        info['fest_trace'] = fest_trace
        info['step_trace'] = step_trace
        info['time_trace'] = time_trace
        info['nepoch'] = nepoch

        # Report end of main loop
        if printitn > 0:
            print("End Main Loop")
            print("")
            print("Final f-east: {}".format(fest[0]))
            print("Setup time: {0:0.6f}".format(time_setup0 + time_setup1))
            print("Main loop time: {0:0.6f}".format(time_main))
            print("Total iterations: {}".format(nepoch * epciters))
    # Wrap up / capture remaining info
    info['mainTime'] = time_main
    info['setupTime0'] = time_setup0
    info['setupTime1'] = time_setup1
    info['setupTime'] = time_setup0 + time_setup1

    return Mfin