Exemplo n.º 1
0
def main():
    """
    Main function that runs DeSTIN on a dataset and saves belief states
    as feature vectors.
    """

    params = FourToOneImage(channels=3, global_pooling=True, split_quads=False)
    nm = NodeManager(params)

    data = mil.load_cifar()
    nm.init_whitening(mn=data['patch_mean'], st=data['patch_std'], tr=data['whiten_mat'])
    
    # Train
    nm.process_images(data['train_data'],15000,True,False,data['train_labels'],'for_viewing_centroids_heat_map')
  
    # Record Beliefs
    features = nm.process_images(data['train_data'],[],False,True,np.zeros(data['train_labels'].shape))#array of zeros, no labels during testing
    vts ={}
    vts['features'] = features
    vts['labels'] = data['train_labels']
 
    # Record Beliefs
    features = nm.process_images(data['test_data'],[],False,True,np.zeros(data['test_labels'].shape))

    vts['test_features'] = features
    vts['test_labels'] = data['test_labels']
    io.savemat(MAT_PATH, vts)
Exemplo n.º 2
0
def load_dataset(dataset_name: str, loss: str) -> (TensorDataset, TensorDataset):
    if dataset_name == "cifar10":
        return load_cifar(loss)
    elif dataset_name == "cifar10-1k":
        train, test = load_cifar(loss)
        return take_first(train, 1000), test
    elif dataset_name == "cifar10-2k":
        train, test = load_cifar(loss)
        return take_first(train, 2000), test
    elif dataset_name == "cifar10-5k":
        train, test = load_cifar(loss)
        return take_first(train, 5000), test
    elif dataset_name == "cifar10-10k":
        train, test = load_cifar(loss)
        return take_first(train, 10000), test
    elif dataset_name == "cifar10-20k":
        train, test = load_cifar(loss)
        return take_first(train, 20000), test
    elif dataset_name == "chebyshev-5-20":
        return make_chebyshev_dataset(k=5, n=20)
    elif dataset_name == "chebyshev-4-20":
        return make_chebyshev_dataset(k=4, n=20)
    elif dataset_name == "chebyshev-3-20":
        return make_chebyshev_dataset(k=3, n=20)
    elif dataset_name == 'linear-50-50':
        return make_linear_dataset(n=50, d=50)
Exemplo n.º 3
0
def load_dataset(dataset_name: str,
                 loss: str) -> (TensorDataset, TensorDataset):
    if dataset_name == "cifar10":
        return load_cifar(loss)
    elif dataset_name == "cifar10-1k":
        train, test = load_cifar(loss)
        return take_first(train, 1000), test
    elif dataset_name == "cifar10-2k":
        train, test = load_cifar(loss)
        return take_first(train, 2000), test
    elif dataset_name == "cifar10-5k":
        train, test = load_cifar(loss)
        return take_first(train, 5000), test
    elif dataset_name == "cifar10-10k":
        train, test = load_cifar(loss)
        return take_first(train, 10000), test
    elif dataset_name == "cifar10-20k":
        train, test = load_cifar(loss)
        return take_first(train, 20000), test
Exemplo n.º 4
0
                        help='Evaluation dataset')
    opt = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    model = load_model(device, opt.model)

    if model is None:
        pass

    else:
        model_sp = copy.deepcopy(model)

        if opt.eval == 'cifar':
            # Loading CIFAR data
            cifar_testloader = load_cifar('test')
            cifar_trainloader = load_cifar('train')

            # Original parameters
            org_params = sum([np.prod(p.size()) for p in model.parameters()])

            print('\nEvaluating original model')
            org_param_str = "\nNumber of Original Parameters: %.1fM" % (
                org_params / 1e6)
            print(org_param_str)

            # Evaluate dataset
            acc, avg_inf_time = eval_cifar(model, cifar_testloader, device)
            print(f'\nAccuracy - {acc}')
            print(f'Average Inference Time - {avg_inf_time}')
            print('\n*********************************************\n')
Exemplo n.º 5
0
import sys

sys.path.append('/gpfs/projects/nct00/nct00002/cifar-utils')
from cifar import load_cifar

parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=2048)
parser.add_argument('--n_gpus', type=int, default=1)

args = parser.parse_args()
batch_size = args.batch_size
epochs = args.epochs
n_gpus = args.n_gpus

train_ds, test_ds = load_cifar(batch_size)

#list all devices gpus
device_type = 'GPU'
devices = tf.config.experimental.list_physical_devices(device_type)
devices_names = [d.name.split("e:")[1] for d in devices]

strategy = tf.distribute.MirroredStrategy(devices=devices_names[:n_gpus])
with strategy.scope():
    model = tf.keras.applications.resnet_v2.ResNet50V2(include_top=True,
                                                       weights=None,
                                                       input_shape=(128, 128,
                                                                    3),
                                                       classes=10)

    opt = tf.keras.optimizers.SGD(0.01 * n_gpus)
Exemplo n.º 6
0
        'profile': params['profile'],
        'narray': params['narray'],
        'sigma': params['sigma'],
        'rpr_alloc': params['rpr_alloc'],
        'thresh': params['thresh']
    }

    for r in result:
        r.update(update)
        return_list.append(r)


####

# model, x, y = load_resnet(num_example=1, array_params=array_params)
model, x, y = load_cifar(num_example=1, array_params=array_params)

####

start = time.time()

load_profile_adc = False

if not load_profile_adc:
    profile = model.profile_adc(x=x)
    np.save('profile_adc', profile)
else:
    profile = np.load('profile_adc.npy', allow_pickle=True).item()
    model.set_profile_adc(profile)

##########################