def evaluate_qcssgd(T, Wopt, file_name, H, feedback, beta):
    model = rm.RegressionModel()
    model.create(T,
                 Wopt,
                 quantizer='qcs',
                 num_levels=1,
                 H=H,
                 feedback=feedback,
                 beta=beta)

    loss = np.zeros((num_lr, repeat_num, num_iterations))
    loss2 = np.zeros((num_lr, repeat_num, num_iterations))
    for n, lr in enumerate(learning_rates):
        start = time.time()
        print('\nLearning rate = ', lr, flush=True)
        for rp in range(repeat_num):
            model.reset()

            info_str = ' '
            for cnt in range(num_iterations):
                # apply qsgd quantization method to the gradients
                gh = model.compute_quantized_gradients(batch_size)
                model.apply_gradients([gh], learning_rate=lr)

                cur_loss = model.loss(batch_size=1024)
                loss[n, rp, cnt] += cur_loss
                loss2[n, rp, cnt] += (cur_loss**2)
                if cnt % 10 == 0:
                    print(' ' * len(info_str), end='\r', flush=True)

                    if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):
                        print('Diverged.', end='\r', flush=True)
                        break

                    info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(
                        rp, cnt, cur_loss)
                    print(info_str, end='\r', flush=True)

            print('')

        elapsed = time.time() - start
        print(' elapsed time = %.3f' % elapsed, flush=True)

    sio.savemat(file_name,
                mdict={
                    'loss': loss,
                    'loss2': loss2,
                    'lr': learning_rates,
                })
def evaluate_onebit(T, Wopt, file_name):
    model = rm.RegressionModel(T, Wopt)
    quantizer = obq.onebit_quantizer()

    loss = np.zeros((num_lr, repeat_num, num_iterations))
    loss2 = np.zeros((num_lr, repeat_num, num_iterations))
    for n, lr in enumerate(learning_rates):
        start = time.time()
        print('\nLearning rate = ', lr, flush=True)
        for rp in range(repeat_num):
            # create model
            model.reset()
            quantizer.reset()

            info_str = ' '
            for cnt in range(num_iterations):
                # apply one-bit quantization method to the gradients
                g = model.gradient(batch_size)
                gh = quantizer.quantize(g)
                model.update(gh, learning_rate=lr)

                cur_loss = model.loss(batch_size=1024)
                loss[n, rp, cnt] += cur_loss
                loss2[n, rp, cnt] += (cur_loss**2)
                if cnt % 10 == 0:
                    print(' ' * len(info_str), end='\r', flush=True)

                    if (not np.isfinite(cur_loss)) or (cur_loss > 1e10):
                        print(' Diverged.', end='\r', flush=True)
                        break

                    info_str = ' exp: {0: 2d}, iteration: {1: 4d}, loss={2:.5f}'.format(
                        rp, cnt, cur_loss)
                    print(info_str, end='\r', flush=True)

            print('')

        elapsed = time.time() - start
        print(' elapsed time = %.3f' % elapsed, flush=True)

    sio.savemat(file_name,
                mdict={
                    'loss': loss,
                    'loss2': loss2,
                    'lr': learning_rates,
                })