コード例 #1
0
    def __init__(self, root, type='train', transform=None):
        self.root = os.path.expanduser(root)
        self.transform = transform
        self.type = type
        all_train_image_list = misc.load_pickle(
            os.path.join(self.root, 'train_img_list.pkl'))
        all_test_image_list = misc.load_pickle(
            os.path.join(self.root, 'val_img_list.pkl'))
        self.train_image_list = []
        self.train_labels = []
        self.val_image_list = []
        self.val_labels = []
        self.test_image_list = []
        self.test_labels = []
        for i in range(1000):
            self.train_image_list += all_train_image_list[i][:-50]
            self.train_labels += [i] * len(all_train_image_list[i][:-50])
            self.val_image_list += all_train_image_list[i][-50:]
            self.val_labels += [i] * 50
            self.test_image_list += all_test_image_list[i]
            self.test_labels += [i] * 50

        if self.type == 'train':
            self.data = self.train_image_list
            self.labels = self.train_labels
        elif self.type == 'val':
            self.data = self.val_image_list
            self.labels = self.val_labels
        elif self.type == 'train+val':
            self.data = self.train_image_list + self.val_image_list
            self.labels = self.train_labels + self.val_labels
        elif self.type == 'test':
            self.data = self.test_image_list
            self.labels = self.test_labels
コード例 #2
0
def get_film_reviews():
    """Gets the film reviews, building them if they are not saved as a pickle file.
    @returns {List} reviews"""
    if os.path.isfile("{}/{}".format(os.environ['PICKLE_DIRECTORY'], 'movie_reviews.pkl')):
        print("Movie Reviews found, using pickle file.")
        return load_pickle('movie_reviews.pkl')
    else:
        print("Building movie reviews.")
        return build_film_reviews()
コード例 #3
0
def get_classifier():
    """Gets the film classifier - loading it from memory if
    stored as a pickle file, else creating one if not stored.
    @returns {Classifier} Film Classifier"""
    if os.path.isfile("{}/{}".format(os.environ['PICKLE_DIRECTORY'], 'filmClassifier.pkl')):
        print("Classifier found, using pickle file")
        return load_pickle('filmClassifier.pkl')
    else:
        print("Classifier not found, creating classifier")
        return build_classifier()
コード例 #4
0
ファイル: dataset.py プロジェクト: frankwang345/cdrp-detect
    def __init__(self, images_list_path, start_class, end_class, num_per_class, random_order=False, transform=None):
        self.images_list = load_pickle(images_list_path)
        self.transform = transform
        self.targets = []
        self.images = []

        classes = list(range(1000))[start_class: end_class]
        for i in classes:
            self.targets.extend([i] * num_per_class)
            imgs = self.images_list[i]
            if random_order:
                random.shuffle(imgs)
                self.images.extend(imgs[:num_per_class])
            else:
                self.images.extend(imgs[:num_per_class])
コード例 #5
0
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
import numpy as np
import misc

print("Loading data...")
training_data = misc.load_pickle('train')

print(training_data.shape)

lr = linear_model.LinearRegression()

X = training_data[:, 1:training_data.shape[1]]
Y = training_data[:, 0]

print("Fitting...")

lr.fit(X, Y)
コード例 #6
0
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=args.train_batch_size,
                                          shuffle=True,
                                          num_workers=2)

testset = datasets.CIFAR10(root='./data/cifar10',
                           type='test',
                           transform=transform_val)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

print('==> Initializing model...')
pruned_cfg = misc.load_pickle(
    'logs/seed-%d/%s-%s/channel-%d-sparsity-%.2f/pruned_cfg-%.2f.pkl' %
    (args.seed, args.dataset, args.arch, args.expanded_inchannel,
     args.sparsity_level, args.pruned_ratio))

model = models.__dict__[args.arch](args.num_classes, args.expanded_inchannel,
                                   pruned_cfg)

model = model.to(args.device)

optimizer = torch.optim.SGD(model.parameters(),
                            lr=args.lr,
                            momentum=args.mm,
                            weight_decay=args.wd)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
    optimizer,
    milestones=[int(args.epochs * 0.5),
                int(args.epochs * 0.75)],
コード例 #7
0
        return loss


if __name__ == "__main__":
    expanded_inchannel = 80
    pruned_ratio = 0.7
    sparsity_level = 0.5
    arch = 'resnet50'
    multiplier = 1.0
    logdir = 'imagenet-%s/channel-%d-pruned-%.2f' % (arch, expanded_inchannel,
                                                     pruned_ratio)
    import misc
    import models
    print("=> Using model {}".format(arch))
    pruned_cfg = misc.load_pickle(
        '/home/victorfang/pruning-from-scratch/script/logs/imagenet-%s/channel-%d-sparsity-%.2f/pruned_cfg-%.2f.pkl'
        % (arch, expanded_inchannel, sparsity_level, pruned_ratio))

    model = models.__dict__[arch](1000, expanded_inchannel, multiplier,
                                  pruned_cfg)
    # checkpoint=torch.load('/home/victorfang/pruning-from-scratch/data/model_saved/imagenet-resnet50-sparsity-0.50/channel-80-pruned-0.70/checkpoint/flop=1262676704,accuracy=0.69436.tar')
    # model.load_state_dict(checkpoint['state_dict'])
    # model=nn.DataParallel(model)
    model.cuda()
    train(net=model,
          net_name=arch,
          exp_name='imagenet-%s-sparsity-%.2f/channel-%d-pruned-%.2f_new' %
          (arch, sparsity_level, expanded_inchannel, pruned_ratio),
          learning_rate=0.1,
          learning_rate_decay_epoch=[2 * i for i in [30, 60, 90]],
          num_epochs=2 * 100,
コード例 #8
0
parser.add_argument('--seed', default=4699, type=int)

args = parser.parse_args()
args.seed = misc.set_seed(args.seed)

args.device = 'cuda'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

args.eps = 0.001
args.num_classes = 100

args.logdir = 'logs/seed-%d/%s-%s/channel-%d-sparsity-%.2f' % (
    args.seed, args.dataset, args.arch, args.expanded_inchannel,
    args.sparsity_level)

gates_params = misc.load_pickle(os.path.join(args.logdir, 'channel_gates.pkl'))


def calculate_flops(model, input_size=(1, 3, 32, 32)):
    model = flop_counter.add_flops_counting_methods(model)
    model.eval().start_flops_count()
    inp = torch.randn(*input_size)
    out = model(inp)
    flops = model.compute_average_flops_cost()
    return flops


print('==> Initializing full model...')
model = models.__dict__[args.arch](args.num_classes)

full_flops = calculate_flops(model)
コード例 #9
0
args.gpu = 0
args.world_size = 1

if args.distributed:
    args.gpu = args.local_rank % torch.cuda.device_count()
    torch.cuda.set_device(args.gpu)
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    args.world_size = torch.distributed.get_world_size()

if args.local_rank == 0:
    misc.prepare_logging(args)

print("=> Using model {}".format(args.arch))
pruned_cfg = misc.load_pickle(
    'logs/imagenet-%s/channel-%d-sparsity-%.2f/pruned_cfg-%.2f.pkl' %
    (args.arch, args.expanded_inchannel, args.sparsity_level,
     args.pruned_ratio))

model = models.__dict__[args.arch](1000, args.expanded_inchannel,
                                   args.multiplier, pruned_cfg)
model = model.cuda()
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if args.label_smooth:

    class CrossEntropyLabelSmooth(nn.Module):
        def __init__(self, num_classes, epsilon):
            super(CrossEntropyLabelSmooth, self).__init__()
            self.num_classes = num_classes
            self.epsilon = epsilon
コード例 #10
0
import cv2

imagenet_urls = [
    'http://ml.cs.tsinghua.edu.cn/~chenxi/dataset/val224_compressed.pkl'
]
parser = argparse.ArgumentParser(
    description='Extract the ILSVRC2012 val dataset')
parser.add_argument('--in_file',
                    default='val224_compressed.pkl',
                    help='input file path')
parser.add_argument('--out_root',
                    default='~/public_dataset/pytorch/imagenet-data/',
                    help='output file path')
args = parser.parse_args()

d = misc.load_pickle(args.in_file)
assert len(d['data']) == 50000, len(d['data'])
assert len(d['target']) == 50000, len(d['target'])

data224 = []
data299 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=50000):
    img224 = misc.str2img(img)
    img299 = cv2.resize(img224, (299, 299))
    data224.append(img224)
    data299.append(img299)
data_dict224 = dict(data=np.array(data224).transpose(0, 3, 1, 2),
                    target=d['target'])
data_dict299 = dict(data=np.array(data299).transpose(0, 3, 1, 2),
                    target=d['target'])
コード例 #11
0
def get_collaborative_recommender():
    """Loads the algorithm's pickle file from memory
    @returns {Algorithm Object} algorithm"""
    return load_pickle('collaborative.pkl')