Beispiel #1
0
def count_images(blog, directory_seq, basedir, seconddir="logs"):
    targetpath = '%s/%s' % (basedir, seconddir)
    checkdir(targetpath)
    filename = '%s/count_images_%02d.txt' % (targetpath, directory_seq)
    f = open(filename, 'a')
    f.write(str(len(blog["images"])) + '\n')
    f.close()
def count_images(blog, directory_seq, basedir, seconddir = "logs"):
    targetpath = '%s/%s' % (basedir, seconddir)
    checkdir(targetpath)
    filename = '%s/count_images_%02d.txt' % (targetpath, directory_seq)
    f = open(filename, 'a')
    f.write(str(len(blog["images"]))+'\n')
    f.close()
def error_log_url(blog_id, log_no, date, directory_seq, basedir, seconddir = "logs"):
    targetpath = '%s/%s' % (basedir, seconddir)
    checkdir(targetpath)
    filename = '%s/error_url_comment_%s-%02d-%02d.txt' % (targetpath, int(date[0:4]), int(date[5:7]), int(date[8:10]))
    f   = open(filename, 'a')
    url = '%s, http://m.blog.naver.com/%s/%s, access denied\n' % (directory_seq, blog_id, log_no)
    f.write(url)
    f.close()
def make_json(blog, blog_id, log_no, date, directory_seq, basedir, seconddir = "comments"):
    PATH = '%s/%02d/%02d' % (int(date[0:4]), int(date[5:7]), int(date[8:10]))
    targetpath = '%s/%s/%02d/%s' % (basedir, seconddir, directory_seq, PATH)
    checkdir(targetpath)
    filename = '%s/%s.json' % (targetpath, log_no)
    f        = open(filename, 'w')
    jsonstr  = json.dumps(blog, sort_keys=True, indent=4, encoding='utf-8')
    f.write(jsonstr)
    f.close()
 def Paser_data(self, prune_classes, fine_tune_classes, prune_rate,
                file_path):
     prune_label = np.array([])
     prune_data = np.array([])
     prune_file_name = np.array([])
     fine_tune_label = np.array([])
     fine_tune_data = np.array([])
     fine_tuen_file_name = np.array([])
     prune_iter = int(1 / prune_rate)
     with open(file_path, 'rb') as f:
         data = pickle.load(f, encoding='latin1')
     size = len(data['labels'])
     iter_ = 0
     for i in range(size):
         if data['labels'][i] in prune_classes and iter_ % prune_iter == 0:
             iter_ += 1
             prune_label = np.append(prune_label, data['labels'][i])
             prune_data = np.append(prune_data, data['data'][i])
             prune_file_name = np.append(prune_file_name,
                                         data['filenames'][i])
         if data['labels'][i] in fine_tune_classes and data['labels'][i] in prune_classes\
                 and i % prune_iter != 0:
             iter_ += 1
             fine_tune_label = np.append(fine_tune_label, data['labels'][i])
             fine_tune_data = np.append(fine_tune_data, data['data'][i])
             fine_tuen_file_name = np.append(fine_tuen_file_name,
                                             data['filenames'][i])
         if data['labels'][i] in fine_tune_classes and data['labels'][
                 i] not in prune_classes:
             fine_tune_label = np.append(fine_tune_label, data['labels'][i])
             fine_tune_data = np.append(fine_tune_data, data['data'][i])
             fine_tuen_file_name = np.append(fine_tuen_file_name,
                                             data['filenames'][i])
     new_dataset = {}
     new_dataset['labels'] = prune_label
     new_dataset['data'] = prune_data.reshape((
         -1,
         data['data'][0].shape[0],
     ))
     new_dataset['filenames'] = prune_file_name
     utils.checkdir(os.path.join(self.root, 'parsed_data', 'prune_data'))
     with open(os.path.join(self.root, 'parsed_data', 'prune_data'),
               'wb') as f:
         pickle.dump(new_dataset, f, 0)
     new_dataset = {}
     new_dataset['labels'] = fine_tune_label
     new_dataset['data'] = fine_tune_data.reshape((
         -1,
         data['data'][0].shape[0],
     ))
     new_dataset['filenames'] = fine_tuen_file_name
     utils.checkdir(os.path.join(self.root, 'parsed_data',
                                 'fine_tune_data'))
     with open(os.path.join(self.root, 'parsed_data', 'prune_data'),
               'wb') as f:
         pickle.dump(new_dataset, f, 0)
def write_json(static_blog, date, seconddir='statistics'):

	basedir ='/home/web/public_html/data/naver-blog'
	PATH = '%s-%02d-%02d' % (int(date[0:4]), int(date[5:7]), int(date[8:10]))
	targetpath = '%s/%s' % (basedir, seconddir)
	checkdir(targetpath)
	filename = '%s/%s.json' % (targetpath, PATH)
	f        = open(filename, 'w')
	jsonstr  = json.dumps(static_blog, sort_keys=True, indent=4, encoding='utf-8')
	f.write(jsonstr)
	f.close()
Beispiel #7
0
def crawl_blog_posts_for_query_per_date(query, date, db_pool=None):
    def get_keys_from_page(query, date, pagenum):
        root = html.parse(listurl % (query, date, date, pagenum))
        items = root.xpath('//ul[@class="list_type_1 search_list"]')[0]

        blog_ids = items.xpath('./input[@name="blogId"]/@value')
        log_nos = items.xpath('./input[@name="logNo"]/@value')
        times = [utils.format_datetime(utils.parse_datetime(time))\
            for time in items.xpath('./li/div[@class="list_data"]/span[@class="date"]/text()')]

        return {(b, l): t for b, l, t in zip(blog_ids, log_nos, times)}

    if db_pool is None:
        # make directories
        subdir = '/'.join([DATADIR, query, date.split('-')[0]])
        utils.checkdir(subdir)
        if REMOTE:
            rsubdir = '/'.join([REMOTE['dir'], query, date.split('-')[0]])
            utils.rcheckdir(sftp, rsubdir)

    # check number of items
    try:
        nitems = get_nitems_for_query(query, date, date)
    except IndexError:
        print query, date, 'None'
        return

    # crawl items
    for pagenum in range(int(nitems / 10.)):
        keys = get_keys_from_page(query, date, pagenum + 1)
        tags = get_tags_for_items(keys)
        for (blog_id, log_no), written_time in keys.items():
            try:
                info = crawl_blog_post(blog_id,
                                       log_no,
                                       tags,
                                       written_time,
                                       verbose=False)
                if db_pool is None:
                    localpath = '%s/%s.json' % (subdir, log_no)
                    utils.write_json(info, localpath)
                    if REMOTE:
                        remotepath = '%s/%s.json' % (rsubdir, log_no)
                        sftp.put(localpath, remotepath)
                else:
                    db_pool.insert_blog_to_db(info)
            except IndexError:
                print Exception(\
                    'Crawl failed for http://blog.naver.com/%s/%s' % (blog_id, log_no))

            time.sleep(SLEEP)

    overwrite_queries(query, date)
    print query, date, nitems
Beispiel #8
0
    def save_checkpoint(self, val_loss, model, cpath, spath):
        '''Saves model when validation loss decrease.'''
        msg = ""
        if self.verbose:
            msg = f'saved (VLoss {self.val_loss_min:.4f}->{val_loss:.4f})'
            # print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')

        # torch.save(model.state_dict(), 'checkpoint.pt')
        utils.checkdir(cpath)
        torch.save(model, spath)

        self.val_loss_min = val_loss
        return msg
def crawl_blog_posts_for_query_per_date(query, date, db_pool=None):

    def get_keys_from_page(query, date, pagenum):
        root = html.parse(listurl % (query, date, date, pagenum))
        items = root.xpath('//ul[@class="list_type_1 search_list"]')[0]

        blog_ids = items.xpath('./input[@name="blogId"]/@value')
        log_nos = items.xpath('./input[@name="logNo"]/@value')
        times = [utils.format_datetime(utils.parse_datetime(time))\
            for time in items.xpath('./li/div[@class="list_data"]/span[@class="date"]/text()')]

        return {(b, l): t for b, l, t in zip(blog_ids, log_nos, times)}

    if db_pool is None:
        # make directories
        subdir = '/'.join([DATADIR, query, date.split('-')[0]])
        utils.checkdir(subdir)
        if REMOTE:
            rsubdir = '/'.join([REMOTE['dir'], query, date.split('-')[0]])
            utils.rcheckdir(sftp, rsubdir)

    # check number of items
    try:
        nitems = get_nitems_for_query(query, date, date)
    except IndexError:
        print query, date, 'None'
        return

    # crawl items
    for pagenum in range(int(nitems/10.)):
        keys = get_keys_from_page(query, date, pagenum + 1)
        tags = get_tags_for_items(keys)
        for (blog_id, log_no), written_time in keys.items():
            try:
                info = crawl_blog_post(blog_id, log_no, tags, written_time, verbose=False)
                if db_pool is None:
                    localpath = '%s/%s.json' % (subdir, log_no)
                    utils.write_json(info, localpath)
                    if REMOTE:
                        remotepath = '%s/%s.json' % (rsubdir, log_no)
                        sftp.put(localpath, remotepath)
                else:
                    db_pool.insert_blog_to_db(info)
            except IndexError:
                print Exception(\
                    'Crawl failed for http://blog.naver.com/%s/%s' % (blog_id, log_no))

            time.sleep(SLEEP)

    overwrite_queries(query, date)
    print query, date, nitems
Beispiel #10
0
def make_json(blog,
              blog_id,
              log_no,
              date,
              directory_seq,
              basedir,
              seconddir="comments"):
    PATH = '%s/%02d/%02d' % (int(date[0:4]), int(date[5:7]), int(date[8:10]))
    targetpath = '%s/%s/%02d/%s' % (basedir, seconddir, directory_seq, PATH)
    checkdir(targetpath)
    filename = '%s/%s.json' % (targetpath, log_no)
    f = open(filename, 'w')
    jsonstr = json.dumps(blog, sort_keys=True, indent=4, encoding='utf-8')
    f.write(jsonstr)
    f.close()
Beispiel #11
0
def error_log_url(blog_id,
                  log_no,
                  date,
                  directory_seq,
                  basedir,
                  seconddir="logs"):
    targetpath = '%s/%s' % (basedir, seconddir)
    checkdir(targetpath)
    filename = '%s/error_url_comment_%s-%02d-%02d.txt' % (
        targetpath, int(date[0:4]), int(date[5:7]), int(date[8:10]))
    f = open(filename, 'a')
    url = '%s, http://m.blog.naver.com/%s/%s, access denied\n' % (
        directory_seq, blog_id, log_no)
    f.write(url)
    f.close()
Beispiel #12
0
def train(model, pad_index,train_iter,valid_iter,args, num_epochs=10, lr=0.0003, print_every=100):
    """Train a model on IWSLT"""
    
    if USE_CUDA:
        model.cuda()

    # optionally add label smoothing; see the Annotated Transformer
    criterion = nn.NLLLoss(reduction="sum", ignore_index=pad_index)
    optim = torch.optim.Adam(model.parameters(), lr=lr)

    comp1 = utils.print_nonzeros(model)
    
    dev_perplexities = []
    tr_perplexities = []
    for epoch in range(num_epochs):
      
        print("Epoch", epoch)
        model.train()
        train_perplexity = run_epoch((rebatch(pad_index, b) for b in train_iter), 
                                     model,
                                     SimpleLossCompute(model.generator, criterion, optim),
                                     print_every=print_every)
        tr_perplexities.append(train_perplexity)
        model.eval()
        with torch.no_grad():
            print_examples((rebatch(pad_index, x) for x in valid_iter), 
                           model, n=3, src_vocab=SRC.vocab, trg_vocab=TRG.vocab)        

            dev_perplexity = run_epoch((rebatch(pad_index, b) for b in valid_iter), 
                                       model, 
                                       SimpleLossCompute(model.generator, criterion, None))
            print("Validation perplexity: %f" % dev_perplexity)
            dev_perplexities.append(dev_perplexity)
    
    plt.plot(np.arange(0,len(tr_perplexities)), tr_perplexities, c="blue", label="train perplexity") 
    plt.plot(np.arange(0,len(tr_perplexities)), dev_perplexities, c="red", label="validation perplexity") 
    plt.title(f"Perplexity through epochs (IWSLT, transformer)") 
    plt.xlabel("Iterations") 
    plt.ylabel("Perplexity") 
    plt.legend() 
    plt.grid(color="gray") 
    utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
    plt.savefig(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_Perplexity_{comp1}.png", dpi=1200) 
    plt.close()

    return sum(dev_perplexities)/len(dev_perplexities)
Beispiel #13
0
    def run(self):

        try:
            utils.checkdir(self.SaveDir)

            # create a temporary folder
            self.tmp_folder = tempfile.TemporaryDirectory(suffix = ".TMP",
                                                          prefix="_BrukerGUI_",
                                                          dir = self.SaveDir)
            if self.trigger == "all":
                self._SaveAllChecked()

            elif self.trigger == "single":
                self._SaveSingle()

        except CANCELThread: 
            self.quit()
def extractJsFromHtmlDir(inputdir, outputdir):
	if not utils.checkdir(inputdir, outputdir):
		return
	infiles = utils.lsresult(inputdir)
	for infile in infiles:
		outfile = outputdir + '/' + infile
		infile = inputdir + infile
		if utils.checkfile(infile, outfile):
			extractJsFromHtml(infile, outfile)
Beispiel #15
0
def main():
    expdir = os.path.abspath(os.path.expanduser('exp'))
    checkdir(expdir)

    config = Configures(configFile)
    os.environ["CUDA_VISIBLE_DEVICES"] = config('val', 'WORKER')
    max_steps = config('data', 'SIZE')

    net = ConvNet(config, task='val')
    net, starting_epoch = init_net(net, config)

    voc_loader = VOCloader.Loader(configure=config, task='val')
    train_loader = voc_loader()
    train_iterator = tqdm(train_loader, total=max_steps)

    count = 0
    net.eval()

    for x, y, y_cls in train_iterator:

        x, y, y_cls = Variable(x).cuda(), Variable(y).cuda(), Variable(
            y_cls).cuda()

        out_cls, out = net(x, func='all')

        count += 1

        outdir = os.path.join(expdir, str(count).zfill(6))
        checkdir(outdir)
        name_x = os.path.join(outdir, 'X.npy')
        name_y = os.path.join(outdir, 'y.npy')
        name_out = os.path.join(outdir, 'out.npy')

        xs = x.data[0].cpu().transpose(0, 2).transpose(0, 1).numpy()
        np.save(name_x, xs)
        ys = y.data[0].cpu().numpy()
        np.save(name_y, ys)
        outs = out.data[0].cpu().numpy()
        np.save(name_out, outs)
Beispiel #16
0
def ploter(obs_domain, qc_domain, fill_domain, stat_id):
    '''
        绘制时间范围内三类数据的时间序列图.
    '''
    start = T_DOMAIN.get('start', None)
    end = T_DOMAIN.get('end', None)
    out_dir = PATHS.get('save_dir')
    savedir = checkdir(os.path.join(out_dir, str(stat_id)))

    for _, x in enumerate(SNA+OCEC):
        plt.figure(figsize=(22, 18))
        for i, (name, color) in enumerate(zip(['obs','qc','fill'], ['y', 'r', 'b'])):
            ax = plt.subplot(3, 1, i+1)
            if i == 0:
                data = obs_domain
            elif i == 1:
                data = qc_domain
            elif i == 2:
                data = fill_domain
            ax.plot(data['time'], data[x], color=color, label='{}_{}'.format(name, x))
            ax.set_xlabel('Time (h)', fontsize=13)
            ax.set_ylabel('Conc (ug/m3)', fontsize=13)
            ax1 = ax.twinx()
            ax1.plot(data['time'], data['PM25'], color='g', label='PM25')
            ax1.set_ylabel('Conc (ug/m3)', fontsize=13)
            ax.set_title('{}_{}'.format(x, name), 
                    fontsize=14)
            # legennd
            ax.legend(fontsize=12, loc='upper left')
            ax1.legend(fontsize=12, loc='upper right')

        plt.savefig(os.path.join(
            savedir, '{}_{}_{}_{}'.format(stat_id, 
                                          x, 
                                          start.strftime("%Y%m%d%H"), 
                                          end.strftime("%Y%m%d%H"))))
def train(model, train_loader, optimizer, criterion, mask, score):
    best_accuracy = 0
    compress = []
    bestacc = []
    loss = []
    acc = []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.train()
    pbar = tqdm(train_loader)
    for i, (imgs, targets) in enumerate(pbar):
        imgs, targets = imgs.to(device), targets.to(device)
        lr = optimizer.param_groups[0]['lr']
        optimizer.zero_grad()

        if args.binarize:
            W = []
            cnt = 0
            for name, p in model.named_parameters():
                if 'weight' in name:
                    M = torch.from_numpy(mask[cnt]).to(device)
                    alpha = (torch.sum((p.data * M)**2) / torch.sum(M**2))**0.5
                    W.append(p.clone().detach())
                    p.data.sign_().mul_(M).mul_(alpha)
                    cnt += 1
            output = model(imgs)
            cnt = 0
            for name, p in model.named_parameters():
                if 'weight' in name:
                    p.data.zero_().add_(W[cnt])
                    cnt += 1
        else:
            output = model(imgs)

        train_loss = criterion(output, targets)
        train_loss.backward()
        cnt = 0
        for name, p in model.named_parameters():
            if 'weight' in name:
                tensor = p.data.cpu().numpy()
                grad_tensor = p.grad.data.cpu().numpy()
                grad_tensor = np.where(tensor < 1e-6, 0, grad_tensor)
                p.grad.data = torch.from_numpy(grad_tensor).to(device)
                score[cnt] -= lr * p.grad.data
                cnt += 1

        optimizer.step()

        if args.mini_batch:
            cnt = 0
            for name, p in model.named_parameters():
                if 'weight' in name:
                    if args.score:
                        if score[cnt].dim() > 3:
                            sorted, indices = torch.sort(torch.abs(score[cnt]),
                                                         dim=0)
                            tensor = indices.cpu().numpy()
                            percentile_value = np.percentile(
                                tensor, args.prune_percent)
                            mask[cnt] = np.where(tensor < percentile_value, 0,
                                                 mask[cnt])
                            #print(np.count_nonzero(mask[cnt])/np.prod(mask[cnt].shape))
                    else:
                        tensor = p.data.cpu().numpy()
                        alive = tensor[np.nonzero(tensor)]
                        percentile_value = np.percentile(
                            abs(alive), args.prune_percent)
                        mask[cnt] = np.where(
                            abs(tensor) < percentile_value, 0, mask[cnt])
                    p.data = torch.from_numpy(
                        mask[cnt] * p.data.cpu().numpy()).to(p.device)
                    cnt += 1
            #print(utils.print_nonzeros(model))
        comp1 = utils.print_nonzeros(model)
        if i % (len(train_loader) // args.test_freq) == 0:
            loss.append(train_loss.item())

            accuracy = test(model, mask, test_loader, criterion)
            acc.append(accuracy)
            # Save Weights
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                utils.checkdir(
                    f'{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/')
                torch.save(
                    model,
                    f'{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{i}_model.pth.tar'
                )
            #print(f'Train Epoch: {i}/{train_ite} Loss: {train_loss.item():.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%')
            #pbar.set_description(f'Train Epoch: {i}/{train_ite} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%')
            if args.mini_batch:
                compress.append(comp1)
                bestacc.append(best_accuracy)
    utils.checkdir(f'{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/')
    with open(
            f'{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/mask_{comp1}.pkl',
            'wb') as fp:
        pickle.dump(mask, fp)
    if args.mini_batch:
        return loss, acc, compress, bestacc
    else:
        return loss, acc, best_accuracy
Beispiel #18
0
    def _SaveAllChecked(self):

        completed = 0
        data = self.parent.tree.ImageData

        checkedItemList = []
        self.parent.tree.findCheckedItems(self.parent.tree.invisibleRootItem(), checkedItemList)

        allDim = 0
        self.progressText.emit(self.tr("Data size counting"))
        for expNumItem in checkedItemList:
            allDim += int(utils.num_pattern.findall(expNumItem.text(0))[1])

        for expNumItem in checkedItemList:
            exp_name = self.parent.tree.getExpNameItem(expNumItem).text(0)
            exp_num = utils.num_pattern.findall(expNumItem.text(0))[0]

            saveDir = os.path.join(self.tmp_folder.name, exp_name)
            utils.checkdir(saveDir)

            if self.saveType == "Image":
                saveDir = os.path.join(saveDir, exp_num)
                utils.checkdir(saveDir)

            if self.saveType != "Image":
                fname = '{0}{1}Experiment_{2}.{3}'.format(saveDir,
                                                          os.sep,
                                                          exp_num,
                                                          self.form)

            img_data = data[exp_name][exp_num]["data"]
            for i in range(img_data.Dimension[0]):
                if self.cancelThread:
                    raise CANCELThread()

                if self.saveType == "Image":
                    fname = '{0}{1}Image_{2}.{3}'.format(saveDir, 
                                                         os.sep, 
                                                         i+1, 
                                                         self.form)
                    self.progressText.emit(
                        self.tr("Writting Image_{0}.{1} to the folder /{2}/{3}").format(
                                                                                i+1,
                                                                                self.form,
                                                                                exp_name,
                                                                                exp_num))
                    toimage(img_data.IntenseData[i,:,:], 
                            cmin=img_data.min_val, cmax=img_data.max_val).save(fname)

                else:    
                    self.progressText.emit(
                        self.tr("Writting Image {0}\{1} to the Experiment_{2}.{3}").format(
                                                                            i+1,
                                                                            img_data.Dimension[0],
                                                                            exp_num,
                                                                            self.form))

                    eval("bruker.SingleWriteTo{}File".format(self.saveType))(fname,
                                                                             img_data,
                                                                             i,
                                                                             i==0)

                completed += 100/allDim
                self.progress.emit(completed)
Beispiel #19
0
def main(args, ITE=0):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    reinit = True if args.prune_type == "reinit" else False
    if args.save_dir:
        utils.checkdir(
            f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.save_dir}/"
        )
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/"
        )
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/"
        )
    else:
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
        utils.checkdir(f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/")

    # Data Loader
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    if args.dataset == "mnist":
        traindataset = datasets.MNIST('../data',
                                      train=True,
                                      download=True,
                                      transform=transform)
        testdataset = datasets.MNIST('../data',
                                     train=False,
                                     transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar10":
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

        traindataset = datasets.CIFAR10('../data',
                                        train=True,
                                        download=True,
                                        transform=transform_train)
        testdataset = datasets.CIFAR10('../data',
                                       train=False,
                                       transform=transform_test)
        from archs.cifar10 import AlexNet, LeNet5, fc1, vgg, resnet, densenet

    elif args.dataset == "fashionmnist":
        traindataset = datasets.FashionMNIST('../data',
                                             train=True,
                                             download=True,
                                             transform=transform)
        testdataset = datasets.FashionMNIST('../data',
                                            train=False,
                                            transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar100":
        traindataset = datasets.CIFAR100('../data',
                                         train=True,
                                         download=True,
                                         transform=transform)
        testdataset = datasets.CIFAR100('../data',
                                        train=False,
                                        transform=transform)
        from archs.cifar100 import AlexNet, fc1, LeNet5, vgg, resnet

    # If you want to add extra datasets paste here

    else:
        print("\nWrong Dataset choice \n")
        exit()

    if args.dataset == "cifar10":
        #trainsampler = torch.utils.data.RandomSampler(traindataset, replacement=True, num_samples=45000)  # 45K train dataset
        #train_loader = torch.utils.data.DataLoader(traindataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False, sampler=trainsampler)
        train_loader = torch.utils.data.DataLoader(traindataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=4)
    else:
        train_loader = torch.utils.data.DataLoader(traindataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=0,
                                                   drop_last=False)
    #train_loader = cycle(train_loader)
    test_loader = torch.utils.data.DataLoader(testdataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=4)

    # Importing Network Architecture

    #Initalize hessian dataloader, default batch_num 1
    for inputs, labels in train_loader:
        hessian_dataloader = (inputs, labels)
        break

    global model
    if args.arch_type == "fc1":
        model = fc1.fc1().to(device)
    elif args.arch_type == "lenet5":
        model = LeNet5.LeNet5().to(device)
    elif args.arch_type == "alexnet":
        model = AlexNet.AlexNet().to(device)
    elif args.arch_type == "vgg16":
        model = vgg.vgg16().to(device)
    elif args.arch_type == "resnet18":
        model = resnet.resnet18().to(device)
    elif args.arch_type == "densenet121":
        model = densenet.densenet121().to(device)
    # If you want to add extra model paste here
    else:
        print("\nWrong Model choice\n")
        exit()

    model = nn.DataParallel(model)
    # Weight Initialization
    model.apply(weight_init)

    # Copying and Saving Initial State
    initial_state_dict = copy.deepcopy(model.state_dict())
    if args.save_dir:
        torch.save(
            model.state_dict(),
            f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.save_dir}/initial_state_dict_{args.prune_type}.pth"
        )
    else:
        torch.save(
            model.state_dict(),
            f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth"
        )

    # global total_params
    total_params = 0
    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())
        total_params += param.numel()

    # Making Initial Mask
    make_mask(model, total_params)

    # Optimizer and Loss
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    # warm up schedule; scheduler_warmup is chained with schduler_steplr
    scheduler_steplr = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                            milestones=[0, 15],
                                                            gamma=0.1,
                                                            last_epoch=-1)
    if args.warmup:
        scheduler_warmup = GradualWarmupScheduler(
            optimizer,
            multiplier=1,
            total_epoch=50,
            after_scheduler=scheduler_steplr)  # 20K=(idx)56, 35K=70
    criterion = nn.CrossEntropyLoss(
    )  # Default was F.nll_loss; why test, train different?

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    bestacc = 0.0
    best_accuracy = 0
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION, float)
    bestacc = np.zeros(ITERATION, float)
    step = 0
    all_loss = np.zeros(args.end_iter, float)
    all_accuracy = np.zeros(args.end_iter, float)

    for _ite in range(args.start_iter, ITERATION):
        if not _ite == 0:
            prune_by_percentile(args.prune_percent,
                                resample=resample,
                                reinit=reinit,
                                total_params=total_params,
                                hessian_aware=args.hessian,
                                criterion=criterion,
                                dataloader=hessian_dataloader,
                                cuda=torch.cuda.is_available())
            if reinit:
                model.apply(weight_init)
                #if args.arch_type == "fc1":
                #    model = fc1.fc1().to(device)
                #elif args.arch_type == "lenet5":
                #    model = LeNet5.LeNet5().to(device)
                #elif args.arch_type == "alexnet":
                #    model = AlexNet.AlexNet().to(device)
                #elif args.arch_type == "vgg16":
                #    model = vgg.vgg16().to(device)
                #elif args.arch_type == "resnet18":
                #    model = resnet.resnet18().to(device)
                #elif args.arch_type == "densenet121":
                #    model = densenet.densenet121().to(device)
                #else:
                #    print("\nWrong Model choice\n")
                #    exit()
                step = 0
                for name, param in model.named_parameters():
                    if 'weight' in name:
                        param_frac = param.numel() / total_params
                        if param_frac > 0.01:
                            weight_dev = param.device
                            param.data = torch.from_numpy(
                                param.data.cpu().numpy() *
                                mask[step]).to(weight_dev)
                            step = step + 1
                step = 0
            else:
                original_initialization(mask, initial_state_dict, total_params)
            # optimizer = torch.optim.SGD([{'params': model.parameters(), 'initial_lr': 0.03}], lr=args.lr, momentum=0.9, weight_decay=1e-4)
            # scheduler_steplr = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0, 14], gamma=0.1, last_epoch=-1)
            # scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=56, after_scheduler=scheduler_steplr)  # 20K=(idx)56, 35K=70
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")

        # Optimizer and Loss
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=0.9,
                                    weight_decay=1e-4)
        # warm up schedule; scheduler_warmup is chained with schduler_steplr
        scheduler_steplr = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=[0, 15], gamma=0.1, last_epoch=-1)
        if args.warmup:
            scheduler_warmup = GradualWarmupScheduler(
                optimizer,
                multiplier=1,
                total_epoch=50,
                after_scheduler=scheduler_steplr)  # 20K=(idx)56, 35K=70

        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1
        pbar = tqdm(range(args.end_iter))  # process bar

        for iter_ in pbar:

            # Frequency for Testing
            if iter_ % args.valid_freq == 0:
                accuracy = test(model, test_loader, criterion)

                # Save Weights for each _ite
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    if args.save_dir:
                        torch.save(
                            model.state_dict(),
                            f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.save_dir}/{_ite}_model_{args.prune_type}.pth"
                        )
                    else:
                        # torch.save(model,f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth")
                        torch.save(
                            model.state_dict(),
                            f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth"
                        )

            # Training
            loss = train(model, train_loader, optimizer, criterion,
                         total_params)
            all_loss[iter_] = loss
            all_accuracy[iter_] = accuracy

            # warm up
            if args.warmup:
                scheduler_warmup.step()
            _lr = optimizer.param_groups[0]['lr']

            # Save the model during training
            if args.save_freq > 0 and iter_ % args.save_freq == 0:
                if args.save_dir:
                    torch.save(
                        model.state_dict(),
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.save_dir}/{_ite}_model_{args.prune_type}_epoch{iter_}.pth"
                    )
                else:
                    torch.save(
                        model.state_dict(),
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}_epoch{iter_}.pth"
                    )

            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}% Learning Rate: {_lr:.6f}%'
                )

        writer.add_scalar('Accuracy/test', best_accuracy, comp1)
        bestacc[_ite] = best_accuracy

        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 100 * (all_loss - np.min(all_loss)) /
                 np.ptp(all_loss).astype(float),
                 c="blue",
                 label="Loss")
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 all_accuracy,
                 c="red",
                 label="Accuracy")
        plt.title(
            f"Loss Vs Accuracy Vs Iterations ({args.dataset},{args.arch_type})"
        )
        plt.xlabel("Iterations")
        plt.ylabel("Loss and Accuracy")
        plt.legend()
        plt.grid(color="gray")
        if args.save_dir:
            plt.savefig(
                f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_LossVsAccuracy_{comp1}.png",
                dpi=1200)
        else:
            plt.savefig(
                f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png",
                dpi=1200)
        plt.close()

        # Dump Plot values
        if args.save_dir:
            all_loss.dump(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_all_loss_{comp1}.dat"
            )
            all_accuracy.dump(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_all_accuracy_{comp1}.dat"
            )
        else:
            all_loss.dump(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_loss_{comp1}.dat"
            )
            all_accuracy.dump(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_accuracy_{comp1}.dat"
            )

        # Dumping mask
        if args.save_dir:
            with open(
                    f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_mask_{comp1}.pkl",
                    'wb') as fp:
                pickle.dump(mask, fp)
        else:
            with open(
                    f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_mask_{comp1}.pkl",
                    'wb') as fp:
                pickle.dump(mask, fp)

        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter, float)
        all_accuracy = np.zeros(args.end_iter, float)

    # Dumping Values for Plotting
    if args.save_dir:
        comp.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_compression.dat"
        )
        bestacc.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_bestaccuracy.dat"
        )
    else:
        comp.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_compression.dat"
        )
        bestacc.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_bestaccuracy.dat"
        )
    # Plotting
    a = np.arange(args.prune_iterations)
    plt.plot(a, bestacc, c="blue", label="Winning tickets")
    plt.title(
        f"Test Accuracy vs Unpruned Weights Percentage ({args.dataset},{args.arch_type})"
    )
    plt.xlabel("Unpruned Weights Percentage")
    plt.ylabel("test accuracy")
    plt.xticks(a, comp, rotation="vertical")
    plt.ylim(0, 100)
    plt.legend()
    plt.grid(color="gray")
    if args.save_dir:
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.save_dir}/{args.prune_type}_AccuracyVsWeights.png",
            dpi=1200)
    else:
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_AccuracyVsWeights.png",
            dpi=1200)
    plt.close()
Beispiel #20
0
def main(args, ITE=0):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    reinit = True if args.prune_type == "reinit" else False

    # Data Loader
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    if args.dataset == "mnist":
        traindataset = datasets.MNIST('../data',
                                      train=True,
                                      download=True,
                                      transform=transform)
        testdataset = datasets.MNIST('../data',
                                     train=False,
                                     transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar10":
        traindataset = datasets.CIFAR10('../data',
                                        train=True,
                                        download=True,
                                        transform=transform)
        testdataset = datasets.CIFAR10('../data',
                                       train=False,
                                       transform=transform)
        from archs.cifar10 import AlexNet, LeNet5, fc1, vgg, resnet, densenet

    elif args.dataset == "fashionmnist":
        traindataset = datasets.FashionMNIST('../data',
                                             train=True,
                                             download=True,
                                             transform=transform)
        testdataset = datasets.FashionMNIST('../data',
                                            train=False,
                                            transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar100":
        traindataset = datasets.CIFAR100('../data',
                                         train=True,
                                         download=True,
                                         transform=transform)
        testdataset = datasets.CIFAR100('../data',
                                        train=False,
                                        transform=transform)
        from archs.cifar100 import AlexNet, fc1, LeNet5, vgg, resnet

    # If you want to add extra datasets paste here

    else:
        print("\nWrong Dataset choice \n")
        exit()

    train_loader = torch.utils.data.DataLoader(traindataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0,
                                               drop_last=False)
    #train_loader = cycle(train_loader)
    test_loader = torch.utils.data.DataLoader(testdataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0,
                                              drop_last=True)

    # Importing Network Architecture
    global model
    if args.arch_type == "fc1":
        model = fc1.fc1().to(device)
    elif args.arch_type == "lenet5":
        model = LeNet5.LeNet5().to(device)
    elif args.arch_type == "alexnet":
        model = AlexNet.AlexNet().to(device)
    elif args.arch_type == "vgg16":
        model = vgg.vgg16().to(device)
    elif args.arch_type == "resnet18":
        model = resnet.resnet18().to(device)
    elif args.arch_type == "densenet121":
        model = densenet.densenet121().to(device)
    # If you want to add extra model paste here
    else:
        print("\nWrong Model choice\n")
        exit()

    # Weight Initialization
    model.apply(weight_init)

    # Copying and Saving Initial State
    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/")
    torch.save(
        model,
        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth.tar"
    )

    # Making Initial Mask
    make_mask(model)

    # Optimizer and Loss
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss()  # Default was F.nll_loss

    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    bestacc = 0.0
    best_accuracy = 0
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION, float)
    bestacc = np.zeros(ITERATION, float)
    step = 0
    all_loss = np.zeros(args.end_iter, float)
    all_accuracy = np.zeros(args.end_iter, float)

    for _ite in range(args.start_iter, ITERATION):
        if not _ite == 0:
            prune_by_percentile(args.prune_percent,
                                resample=resample,
                                reinit=reinit)
            if reinit:
                model.apply(weight_init)
                #if args.arch_type == "fc1":
                #    model = fc1.fc1().to(device)
                #elif args.arch_type == "lenet5":
                #    model = LeNet5.LeNet5().to(device)
                #elif args.arch_type == "alexnet":
                #    model = AlexNet.AlexNet().to(device)
                #elif args.arch_type == "vgg16":
                #    model = vgg.vgg16().to(device)
                #elif args.arch_type == "resnet18":
                #    model = resnet.resnet18().to(device)
                #elif args.arch_type == "densenet121":
                #    model = densenet.densenet121().to(device)
                #else:
                #    print("\nWrong Model choice\n")
                #    exit()
                step = 0
                for name, param in model.named_parameters():
                    if 'weight' in name:
                        weight_dev = param.device
                        param.data = torch.from_numpy(
                            param.data.cpu().numpy() *
                            mask[step]).to(weight_dev)
                        step = step + 1
                step = 0
            else:
                original_initialization(mask, initial_state_dict)
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=1e-4)
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")

        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1
        pbar = tqdm(range(args.end_iter))

        for iter_ in pbar:

            # Frequency for Testing
            if iter_ % args.valid_freq == 0:
                accuracy = test(model, test_loader, criterion)

                # Save Weights
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    utils.checkdir(
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/"
                    )
                    torch.save(
                        model,
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth.tar"
                    )

            # Training
            if _ite == 0:
                loss = train(model, train_loader, optimizer, criterion)
                #needed to be completed
                #teacher_model = ...
            else:
                loss = train_with_distill(model, train_loader, optimizer,
                                          teacher_model)

            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%'
                )

        writer.add_scalar('Accuracy/test', best_accuracy, comp1)
        bestacc[_ite] = best_accuracy

        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 100 * (all_loss - np.min(all_loss)) /
                 np.ptp(all_loss).astype(float),
                 c="blue",
                 label="Loss")
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 all_accuracy,
                 c="red",
                 label="Accuracy")
        plt.title(
            f"Loss Vs Accuracy Vs Iterations ({args.dataset},{args.arch_type})"
        )
        plt.xlabel("Iterations")
        plt.ylabel("Loss and Accuracy")
        plt.legend()
        plt.grid(color="gray")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png",
            dpi=1200)
        plt.close()

        # Dump Plot values
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        all_loss.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_loss_{comp1}.dat"
        )
        all_accuracy.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_accuracy_{comp1}.dat"
        )

        # Dumping mask
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        with open(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_mask_{comp1}.pkl",
                'wb') as fp:
            pickle.dump(mask, fp)

        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter, float)
        all_accuracy = np.zeros(args.end_iter, float)

    # Dumping Values for Plotting
    utils.checkdir(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
    comp.dump(
        f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_compression.dat"
    )
    bestacc.dump(
        f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_bestaccuracy.dat"
    )

    # Plotting
    a = np.arange(args.prune_iterations)
    plt.plot(a, bestacc, c="blue", label="Winning tickets")
    plt.title(
        f"Test Accuracy vs Unpruned Weights Percentage ({args.dataset},{args.arch_type})"
    )
    plt.xlabel("Unpruned Weights Percentage")
    plt.ylabel("test accuracy")
    plt.xticks(a, comp, rotation="vertical")
    plt.ylim(0, 100)
    plt.legend()
    plt.grid(color="gray")
    utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
    plt.savefig(
        f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_AccuracyVsWeights.png",
        dpi=1200)
    plt.close()
Beispiel #21
0
def main(args, ITE=0):
    # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cpu")

    reinit = True if args.prune_type == "reinit" else False

    # Data Loader
    # Change data here
    if args.dataset == 'LV':
        # 1

        X0 = torch.tensor([10., 5.])
        theta = [1.0, 0.1, 1.5, 0.75]
        datfunc = Dat.LotkaVolterra(theta)

        t_train = torch.linspace(0., 25., args.train_size)
        t_eval = torch.linspace(0., 100., args.eval_size)
        t_test = torch.linspace(0, 200, args.test_size)

    elif args.dataset == 'FHN':
        #2

        X0 = torch.tensor([-1.0, 1.0])
        theta = [0.2, 0.2, 3.0]
        datfunc = Dat.FHN(theta)

        t_train = torch.linspace(0., 25., args.train_size)
        t_eval = torch.linspace(0., 100., args.eval_size)
        t_test = torch.linspace(0, 200, args.test_size)

    elif args.dataset == 'Lorenz63':
        #3

        X0 = torch.tensor([1.0, 1.0, 1.0])
        theta = [10.0, 28.0, 8.0 / 3.0]
        datfunc = Dat.Lorenz63(theta)

        t_train = torch.linspace(
            0., 25.,
            args.train_size)  # Need to ask about extents for test case Lorenz
        t_eval = torch.linspace(0., 50., args.eval_size)
        t_test = torch.linspace(0., 100., args.test_size)

    # Need X0 and parameters
    # elif args.dataset == 'Lorenz96':
    # 4
    #     X0 = torch.tensor([])
    #     theta =
    #     datfunc = Lorenz96(theta)

    elif args.dataset == 'ChemicalReactionSimple':
        #5
        X0 = torch.tensor([1., 1.])
        theta = [.5, .8, .4]
        datfunc = Dat.ChemicalReactionSimple(theta)

        t_train = torch.linspace(0., 25., args.train_size)
        t_eval = torch.linspace(0., 100., args.eval_size)
        t_test = torch.linspace(0, 200, args.test_size)

    elif args.dataset == 'Chemostat':
        #6
        X0 = torch.tensor([1., 2., 3., 4., 5., 6., 10.])

        Cetas = np.linspace(2., 3., 6, dtype=float)
        VMs = np.linspace(1., 2., 6, dtype=float)
        KMs = np.ones(6, dtype=float)

        theta = np.squeeze(
            np.concatenate([
                Cetas.reshape([1, -1]),
                VMs.reshape([1, -1]),
                KMs.reshape([1, -1])
            ],
                           axis=1))
        flowrate = 2.
        feedConc = 3.
        datfunc = Dat.Chemostat(6, flowrate, feedConc, theta)

        t_train = torch.linspace(0., 1.,
                                 args.train_size)  # Ask about the extent here
        t_eval = torch.linspace(0., 2., args.eval_size)
        t_test = torch.linspace(0, 5, args.test_size)

    elif args.dataset == 'Clock':
        #7
        X0 = torch.tensor([1, 1.2, 1.9, .3, .8, .98, .8])
        theta = np.asarray([
            .8, .05, 1.2, 1.5, 1.4, .13, 1.5, .33, .18, .26, .28, .5, .089,
            .52, 2.1, .052, .72
        ])
        datfunc = Dat.Clock(theta)

        t_train = torch.linspace(0., 5., args.train_size)
        t_eval = torch.linspace(0., 10., args.eval_size)
        t_test = torch.linspace(0, 20, args.test_size)

    elif args.dataset == 'ProteinTransduction':
        #8
        X0 = torch.tensor([1., 0., 1., 0., 0.])
        theta = [0.07, 0.6, 0.05, 0.3, 0.017, 0.3]
        datfunc = Dat.ProteinTransduction(theta)
        t_train = torch.linspace(0., 25., args.train_size)
        t_eval = torch.linspace(0., 100., args.eval_size)
        t_test = torch.linspace(0, 200, args.test_size)

    X_train = Dat.generate_data(datfunc,
                                X0,
                                t_train,
                                method=args.integrate_method)
    X_eval = Dat.generate_data(datfunc,
                               X0,
                               t_eval,
                               method=args.integrate_method)
    X_test = Dat.generate_data(datfunc,
                               X0,
                               t_test,
                               method=args.integrate_method)

    dx_dt_train = datfunc(t=None, x=X_train.numpy().T)
    dx_dt_eval = datfunc(t=None, x=X_eval.numpy().T)
    dx_dt_test = datfunc(t=None, x=X_test.numpy().T)

    train_queue = (X_train, dx_dt_train.T)

    valid_queue = (X_eval, dx_dt_eval.T)

    # Importing Network Architecture/ Import only one model here
    global model
    if args.arch_type == 'network':
        model = network.fc1().to(device)
    # If you want to add extra model paste here
    else:
        print("\nWrong Model choice\n")
        exit()

    # Weight Initialization
    model.apply(weight_init)

    # Copying and Saving Initial State
    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/")
    torch.save(
        model,
        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth.tar"
    )

    # Making Initial Mask
    make_mask(model)

    # Optimizer and Loss
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)
    # criterion = nn.CrossEntropyLoss() # Default was F.nll_loss
    # Change loss to MSE
    criterion = nn.MSELoss()

    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    # Test accuracy needs to turn in MAE
    bestacc = 0.0
    best_accuracy = 0.3
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION, float)  # What is this?
    bestacc = np.zeros(ITERATION, float)
    step = 0
    all_loss = np.zeros(args.end_iter, float)
    all_accuracy = np.zeros(args.end_iter, float)

    plt.ion()
    fig, ax = plt.subplots(figsize=(20, 20))
    for _ite in range(args.start_iter, ITERATION):
        if not _ite == 0:
            prune_by_percentile(args.prune_percent,
                                resample=resample,
                                reinit=reinit)
            if reinit:
                model.apply(weight_init)
                #if args.arch_type == "fc1":
                #    model = fc1.fc1().to(device)
                #elif args.arch_type == "lenet5":
                #    model = LeNet5.LeNet5().to(device)
                #elif args.arch_type == "alexnet":
                #    model = AlexNet.AlexNet().to(device)
                #elif args.arch_type == "vgg16":
                #    model = vgg.vgg16().to(device)
                #elif args.arch_type == "resnet18":
                #    model = resnet.resnet18().to(device)
                #elif args.arch_type == "densenet121":
                #    model = densenet.densenet121().to(device)
                #else:
                #    print("\nWrong Model choice\n")
                #    exit()
                step = 0
                for name, param in model.named_parameters():
                    if 'weight' in name:
                        weight_dev = param.device
                        param.data = torch.from_numpy(
                            param.data.cpu().numpy() *
                            mask[step]).to(weight_dev)
                        step = step + 1
                step = 0
            else:
                original_initialization(mask, initial_state_dict)
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=1e-4)
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")

        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1

        pbar = tqdm(range(args.end_iter))
        for iter_ in pbar:

            # Frequency for Testing
            if iter_ % args.valid_freq == 0:
                accuracy = test(model, valid_queue, criterion, t_eval, ax)

                # Save Weights
                if accuracy < best_accuracy:
                    best_accuracy = accuracy
                    utils.checkdir(
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/"
                    )
                    torch.save(
                        model,
                        f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pt"
                    )

            # Training
            loss = train(model, train_queue, optimizer, criterion)
            all_loss[iter_] = loss
            all_accuracy[iter_] = accuracy

            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%'
                )
            plt.draw()
            plt.pause(0.1)
        writer.add_scalar('Accuracy/test', best_accuracy, comp1)
        bestacc[_ite] = best_accuracy
        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        # plt.plot(np.arange(1,(args.end_iter)+1), 100*(all_loss - np.min(all_loss))/np.ptp(all_loss).astype(float), c="blue", label="Loss")
        # plt.plot(np.arange(1,(args.end_iter)+1), all_accuracy, c="red", label="Accuracy")
        # plt.title(f"Loss Vs Accuracy Vs Iterations ({args.dataset},{args.arch_type})")
        # plt.xlabel("Iterations")
        # plt.ylabel("Loss and Accuracy")
        # plt.legend()
        # plt.grid(color="gray")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
        # plt.savefig(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png", dpi=1200)
        # plt.close()

        # Dump Plot values
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        all_loss.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_loss_{comp1}.dat"
        )
        all_accuracy.dump(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_accuracy_{comp1}.dat"
        )

        # Dumping mask
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        with open(
                f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_mask_{comp1}.pkl",
                'wb') as fp:
            pickle.dump(mask, fp)

        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter, float)
        all_accuracy = np.zeros(args.end_iter, float)

    # Dumping Values for Plotting
    utils.checkdir(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
    comp.dump(
        f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_compression.dat"
    )
    bestacc.dump(
        f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_bestaccuracy.dat"
    )

    # Plotting
    plt.ioff()
    plt.show()
    a = np.arange(args.prune_iterations)
    # plt.plot(a, bestacc, c="blue", label="Winning tickets")
    # plt.title(f"Test Accuracy vs Unpruned Weights Percentage ({args.dataset},{args.arch_type})")
    # plt.xlabel("Unpruned Weights Percentage")
    # plt.ylabel("test accuracy")
    # plt.xticks(a, comp, rotation ="vertical")
    # plt.ylim(0,100)
    # plt.legend()
    # plt.grid(color="gray")
    utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
Beispiel #22
0
def main(args, ITE=0):
    reinit = True if args.prune_type == "reinit" else False

    traindataset, testdataset = get_split(args.dataset)

    train_loader = torch.utils.data.DataLoader(traindataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               drop_last=False)
    test_loader = torch.utils.data.DataLoader(testdataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=2,
                                              drop_last=True)
    '''
    ### testing whether cifar10 is getting normalized correctly
    mean = 0.0
    for images, _ in train_loader:
        batch_samples = images.size(0) 
        images = images.view(batch_samples, images.size(1), -1)
        # print(images[0]);exit()
        mean += images.mean(2).sum(0)
    mean = mean / len(train_loader.dataset)

    var = 0.0
    for images, _ in train_loader:
        batch_samples = images.size(0)
        images = images.view(batch_samples, images.size(1), -1)
        var += ((images - mean.unsqueeze(1))**2).sum([0,2])
    std = torch.sqrt(var / (len(train_loader.dataset)*images.size(-1)))
    
    print("mean, std: ", mean, std)
    exit()
    '''

    # Importing Network Architecture
    global model
    model = get_model(args.arch_type)

    # Weight Initialization
    model.apply(weight_init)

    # Copying and Saving Initial State
    initial_state_dict = copy.deepcopy(model.state_dict())
    tar_dir = f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.exp_name}/"
    utils.checkdir(tar_dir)
    torch.save(model,
               tar_dir + f"initial_state_dict_{args.prune_type}.pth.tar")

    if not args.rlt:
        # Making Initial Mask
        make_mask(model, None)

    # Optimizer and Loss
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss()  # Default was F.nll_loss

    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    bestacc = 0.0
    best_accuracy = 0
    dump_dir = f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{args.exp_name}/"
    utils.checkdir(dump_dir)
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION, float)
    bestacc = np.zeros(ITERATION, float)
    step = 0
    all_loss = np.zeros(args.end_iter, float)
    all_accuracy = np.zeros(args.end_iter, float)

    for _ite in range(args.start_iter, ITERATION):

        # random lotter ticket
        if args.rlt:
            # percent of weights to prune
            percent = 1 - ((1 - args.prune_percent / 100)**_ite)
            make_mask(model, percent)
            # same original initialized weights, with different random masks
            original_initialization(mask, initial_state_dict)
        else:
            # first net is unpruned!
            if _ite != 0:
                prune_by_percentile(args.prune_percent, reinit=reinit)
                if reinit:
                    model.apply(weight_init)
                    step = 0
                    for name, param in model.named_parameters():
                        if 'weight' in name and 'classifier' in name:
                            weight_dev = param.device
                            param.data = torch.from_numpy(
                                param.data.cpu().numpy() *
                                mask[step]).to(weight_dev)
                            step = step + 1
                    step = 0
                else:
                    original_initialization(mask, initial_state_dict)

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=1e-4)
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")
        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1
        pbar = tqdm(range(args.end_iter))

        for iter_ in pbar:

            # Frequency for Testing
            if iter_ % args.valid_freq == 0:
                accuracy = test(model, test_loader)

                # Save Weights
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    torch.save(
                        model,
                        tar_dir + f"{_ite}_model_{args.prune_type}.pth.tar")

            # Training
            loss = train(model, train_loader, optimizer, criterion)
            all_loss[iter_] = loss
            all_accuracy[iter_] = accuracy

            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%'
                )

        bestacc[_ite] = best_accuracy

        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 100 * (all_loss - np.min(all_loss)) /
                 np.ptp(all_loss).astype(float),
                 c="blue",
                 label="Loss")
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 all_accuracy,
                 c="red",
                 label="Accuracy")
        plt.title(
            f"Loss Vs Accuracy Vs Iterations ({args.dataset},{args.arch_type})"
        )
        plt.xlabel("Iterations")
        plt.ylabel("Loss and Accuracy")
        plt.legend()
        plt.grid(color="gray")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png",
            dpi=1200)
        plt.close()

        # Dump Plot values
        dump_dir = f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.exp_name}/"
        utils.checkdir(dump_dir)
        all_loss.dump(dump_dir + f"{args.prune_type}_all_loss_{comp1}.dat")
        all_accuracy.dump(dump_dir +
                          f"{args.prune_type}_all_accuracy_{comp1}.dat")

        # Dumping mask
        with open(dump_dir + f"{args.prune_type}_mask_{comp1}.pkl",
                  'wb') as fp:
            pickle.dump(mask, fp)

        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter, float)
        all_accuracy = np.zeros(args.end_iter, float)

    # Dumping Values for Plotting
    comp.dump(dump_dir + f"{args.prune_type}_compression.dat")
    bestacc.dump(dump_dir + f"{args.prune_type}_bestaccuracy.dat")

    # Plotting
    a = np.arange(args.prune_iterations)
    plt.plot(a, bestacc, c="blue", label="Winning tickets")
    plt.title(
        f"Test Accuracy vs Unpruned Weights Percentage ({args.dataset},{args.arch_type})"
    )
    plt.xlabel("Unpruned Weights Percentage")
    plt.ylabel("test accuracy")
    plt.xticks(a, comp, rotation="vertical")
    plt.ylim(0, 100)
    plt.legend()
    plt.grid(color="gray")
    utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
    plt.savefig(
        f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_AccuracyVsWeights.png",
        dpi=1200)
    plt.close()
Beispiel #23
0
def main(args, ITE=0):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Data Loader
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    if args.dataset == "mnist":
        traindataset = datasets.MNIST('../data',
                                      train=True,
                                      download=True,
                                      transform=transform)
        testdataset = datasets.MNIST('../data',
                                     train=False,
                                     transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc3, fc5, vgg, resnet

    elif args.dataset == "cifar10":
        traindataset = datasets.CIFAR10('../data',
                                        train=True,
                                        download=True,
                                        transform=transform)
        testdataset = datasets.CIFAR10('../data',
                                       train=False,
                                       transform=transform)
        from archs.cifar10 import AlexNet, LeNet5, fc1, vgg, resnet, densenet

    elif args.dataset == "fashionmnist":
        traindataset = datasets.FashionMNIST('../data',
                                             train=True,
                                             download=True,
                                             transform=transform)
        testdataset = datasets.FashionMNIST('../data',
                                            train=False,
                                            transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar100":
        traindataset = datasets.CIFAR100('../data',
                                         train=True,
                                         download=True,
                                         transform=transform)
        testdataset = datasets.CIFAR100('../data',
                                        train=False,
                                        transform=transform)
        from archs.cifar100 import AlexNet, fc1, LeNet5, vgg, resnet

    else:
        print("\nWrong Dataset choice \n")
        exit()

    train_loader = torch.utils.data.DataLoader(traindataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0,
                                               drop_last=False)
    #train_loader = cycle(train_loader)
    test_loader = torch.utils.data.DataLoader(testdataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0,
                                              drop_last=True)

    model = fc5.fc5().to(device)
    model.apply(weight_init)

    # Copying and Saving Initial State
    output_dir = args.output_dir if args.output_dir else f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/"

    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(output_dir)
    torch.save(
        initial_state_dict,
        os.path.join(output_dir,
                     f"initial_state_dict_{args.prune_type}.pth.tar"))
    optimizer = torch.optim.Adam(model.parameters(),
                                 weight_decay=args.weight_decay)
    criterion = nn.CrossEntropyLoss()

    # Pruning
    # NOTE First Pruning Iteration is of No Compression

    dump_dir = args.dump_dir if args.dump_dir else f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/"

    for _ite in range(args.start_iter, args.prune_iterations + 1):
        best_accuracy = 0
        if not _ite == 0:
            utils.pruning_generate(model, 0.2, method=args.pruning_method)
            model_state_dict = model.state_dict()
            model_orig_weight = utils.rewind_weight(initial_state_dict,
                                                    model_state_dict.keys())
            model_state_dict.update(model_orig_weight)
            model.load_state_dict(model_state_dict)
            torch.save(
                model.state_dict(),
                os.path.join(output_dir,
                             f"{_ite}_model_init_{args.prune_type}.pth.tar"))
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)
        pbar = tqdm(range(args.end_iter))
        for iter_ in pbar:
            if iter_ % args.valid_freq == 0:
                if _ite > 0:
                    print(model.classifier[8].weight_orig[0, :5])
                    print(model.classifier[8].weight_mask[0, :5])
                else:
                    print(model.classifier[8].weight[0, :5])
                accuracy = test(model, test_loader, criterion)
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    utils.checkdir(output_dir)
                    torch.save(
                        model.state_dict(),
                        os.path.join(
                            output_dir,
                            f"{_ite}_model_{args.prune_type}.pth.tar"))
            # Training
            if not args.pruning_method == 'random':
                loss = train(model, train_loader, optimizer, criterion)
            else:
                loss = 100

            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%'
                )
Beispiel #24
0
def main(args, ITE=0):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    reinit = True if args.prune_type=="reinit" else False

    #carregar dataset

    MAX_LEN = 60  # NOTE: we filter out a lot of sentences for speed
    train_data, valid_data, test_data = text_datasets.IWSLT.splits(exts=('.de', '.en'),
                                        fields=(SRC, TRG),
                                        filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
    MIN_FREQ = 2  # NOTE: we limit the vocabulary to frequent words for speed
    SRC.build_vocab(train_data.src, min_freq=MIN_FREQ)
    TRG.build_vocab(train_data.trg, min_freq=MIN_FREQ)

    PAD_INDEX = TRG.vocab.stoi[PAD_TOKEN]

    print_data_info(train_data, valid_data, test_data, SRC, TRG)

    train_iter = data.BucketIterator(train_data, batch_size=64, train=True, 
                                 sort_within_batch=True, 
                                 sort_key=lambda x: (len(x.src), len(x.trg)), repeat=False,
                                 device=DEVICE)
    valid_iter = data.Iterator(valid_data, batch_size=1, train=False, sort=False, repeat=False, device=DEVICE)

    # Importing Network Architecture
    global model

    model = make_model(len(SRC.vocab), len(TRG.vocab),
                   emb_size=256, hidden_size=256,
                   num_layers=1, dropout=0.2)

    print(isinstance(model,nn.Linear))
    # Weight Initialization
    model.apply(weight_init)

    # Copying and Saving Initial State
    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/")
    torch.save(model, f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth.tar")

    # Making Initial Mask
    make_mask(model)

    # Optimizer and Loss
    optim = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = nn.NLLLoss(reduction="sum", ignore_index=PAD_INDEX)

    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    bestacc = 0.0
    best_accuracy = 0
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION,float)
    bestacc = np.zeros(ITERATION,float)
    step = 0
    all_loss = np.zeros(args.end_iter,float)
    all_accuracy = np.zeros(args.end_iter,float)


    for _ite in range(args.start_iter, ITERATION):
        if not _ite == 0:
            prune_by_percentile(args.prune_percent, resample=resample, reinit=reinit)
            if reinit:
                model.apply(weight_init)
                step = 0
                for name, param in model.named_parameters():
                    if 'weight' in name:
                        weight_dev = param.device
                        param.data = torch.from_numpy(param.data.cpu().numpy() * mask[step]).to(weight_dev)
                        step = step + 1
                step = 0
            else:
                original_initialization(mask, initial_state_dict)
            optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")

        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1
        pbar = tqdm(range(args.end_iter))

        for iter_ in pbar:

            # Training
            loss = train(model, pad_index=PAD_INDEX,train_iter=train_iter,valid_iter=valid_iter,args=args)
            accuracy = test(model,valid_iter=valid_iter,pad_index=PAD_INDEX)
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                utils.checkdir(f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/")
                torch.save(model,f"{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth.tar")
            all_loss[iter_] = loss
            all_accuracy[iter_] = accuracy
            
            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                pbar.set_description(
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%')       

        writer.add_scalar('Accuracy/test', best_accuracy, comp1)
        bestacc[_ite]=best_accuracy

        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        # plt.plot(np.arange(1,(args.end_iter)+1), 100*(all_loss - np.min(all_loss))/np.ptp(all_loss).astype(float), c="blue", label="Loss") 
        # plt.plot(np.arange(1,(args.end_iter)+1), all_accuracy, c="red", label="Accuracy") 
        # plt.title(f"Loss Vs Accuracy Vs Iterations ({args.dataset},{args.arch_type})") 
        # plt.xlabel("Iterations") 
        # plt.ylabel("Loss and Accuracy") 
        # plt.legend() 
        # plt.grid(color="gray") 
        # utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
        # plt.savefig(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png", dpi=1200) 
        # plt.close()

        # Dump Plot values
        utils.checkdir(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        all_loss.dump(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_loss_{comp1}.dat")
        all_accuracy.dump(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_all_accuracy_{comp1}.dat")
        
        # Dumping mask
        utils.checkdir(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
        with open(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_mask_{comp1}.pkl", 'wb') as fp:
            pickle.dump(mask, fp)
        
        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter,float)
        all_accuracy = np.zeros(args.end_iter,float)

    # Dumping Values for Plotting
    utils.checkdir(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/")
    comp.dump(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_compression.dat")
    bestacc.dump(f"{os.getcwd()}/dumps/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_bestaccuracy.dat")

    # Plotting
    a = np.arange(args.prune_iterations)
    plt.plot(a, bestacc, c="blue", label="Winning tickets") 
    plt.title(f"Test BLEU vs Unpruned Weights Percentage ({args.dataset},{args.arch_type})") 
    plt.xlabel("Unpruned Weights Percentage") 
    plt.ylabel("test BLEU") 
    plt.xticks(a, comp, rotation ="vertical") 
    plt.ylim(0,100)
    plt.legend() 
    plt.grid(color="gray") 
    utils.checkdir(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/")
    plt.savefig(f"{os.getcwd()}/plots/lt/{args.arch_type}/{args.dataset}/{args.prune_type}_AccuracyVsWeights.png", dpi=1200) 
    plt.close()                    
        model = resnet.resnet18().to(device)
    elif args.arch_type == 'Conv2':
        model = SmallVGG.Conv2().to(device)
    elif args.arch_type == 'Conv4':
        model = SmallVGG.Conv4().to(device)
    elif args.arch_type == 'Conv6':
        model = SmallVGG.Conv6().to(device)
    elif args.arch_type == 'Conv8':
        model = SmallVGG.Conv8().to(device)
    else:
        raise Exception('\nWrong Model choice\n')

    # Weight Initialization
    model.apply(weight_init)
    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(f'{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/')
    torch.save(
        model,
        f'{os.getcwd()}/saves/{args.arch_type}/{args.dataset}/initial_state_dict.pth.tar'
    )

    # Making Initial Mask
    mask = []
    score = []
    for name, p in model.named_parameters():
        if 'weight' in name:
            tensor = p.data.cpu().numpy()
            mask.append(np.ones_like(tensor))
            if p.data.dim() > 1:
                score.append(init.xavier_normal_(torch.ones_like(p.data)))
            else:
Beispiel #26
0
def main(args, ITE=0):
    import pandas as pd
    pd.set_option('display.width', 400)
    pd.set_option('display.max_columns', 10)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    reinit = True if args.prune_type == "reinit" else False
    layerwise = True if args.prune_type == "layerwise" else False

    # Data Loader
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    transform_cifar10 = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    if args.dataset == "mnist":
        traindataset = datasets.MNIST('../data',
                                      train=True,
                                      download=True,
                                      transform=transform)
        testdataset = datasets.MNIST('../data',
                                     train=False,
                                     transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar10":
        traindataset = datasets.CIFAR10('../data',
                                        train=True,
                                        download=True,
                                        transform=transform_cifar10)
        testdataset = datasets.CIFAR10('../data',
                                       train=False,
                                       transform=transform_cifar10)
        from archs.cifar10 import AlexNet, LeNet5, fc1, vgg, resnet, densenet, minivgg

    elif args.dataset == "fashionmnist":
        traindataset = datasets.FashionMNIST('../data',
                                             train=True,
                                             download=True,
                                             transform=transform)
        testdataset = datasets.FashionMNIST('../data',
                                            train=False,
                                            transform=transform)
        from archs.mnist import AlexNet, LeNet5, fc1, vgg, resnet

    elif args.dataset == "cifar100":
        traindataset = datasets.CIFAR100('../data',
                                         train=True,
                                         download=True,
                                         transform=transform)
        testdataset = datasets.CIFAR100('../data',
                                        train=False,
                                        transform=transform)
        from archs.cifar100 import AlexNet, fc1, LeNet5, vgg, resnet

    # If you want to add extra datasets paste here

    else:
        print("\nWrong Dataset choice \n")
        exit()

    # obtain training indices that will be used for validation
    if args.early_stopping:
        print(' Splitting Validation sets ')
        trainset_size = int((1 - args.valid_size) * len(traindataset))
        valset_size = len(traindataset) - trainset_size
        trainset, valset = torch.utils.data.random_split(
            traindataset, [trainset_size, valset_size])

        train_loader = torch.utils.data.DataLoader(trainset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=0,
                                                   drop_last=False)
        valid_loader = torch.utils.data.DataLoader(valset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=0,
                                                   drop_last=False)

    else:
        print(' Eww, no validation set? ')
        train_loader = torch.utils.data.DataLoader(traindataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=0,
                                                   drop_last=False)

    # train_loader = cycle(train_loader)
    test_loader = torch.utils.data.DataLoader(testdataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=0,
                                              drop_last=True)

    # Importing Network Architecture
    global model
    if args.arch_type == "fc1":
        model = fc1.fc1().to(device)
    elif args.arch_type == "lenet5":
        model = LeNet5.LeNet5().to(device)
    elif args.arch_type == "alexnet":
        model = AlexNet.AlexNet().to(device)
    elif args.arch_type == "vgg16":
        model = vgg.vgg16().to(device)
    elif args.arch_type == "resnet18":
        model = resnet.resnet18().to(device)
    elif args.arch_type == "densenet121":
        model = densenet.densenet121().to(device)
    # If you want to add extra model paste here
    elif args.arch_type == "conv2":
        model = minivgg.conv2().to(device)
    elif args.arch_type == "conv4":
        model = minivgg.conv4().to(device)
    elif args.arch_type == "conv6":
        model = minivgg.conv6().to(device)

    else:
        print("\nWrong Model choice\n")
        exit()

    # Weight Initialization. Warning! This drops test acc, so i'm examining this function.
    model.apply(weight_init)

    # get time for file path
    import datetime
    now = datetime.datetime.now()
    now_ = now.strftime("%02m%02d%02H%02M_")

    # Copying and Saving Initial State
    print('  saving initial model... ')
    initial_state_dict = copy.deepcopy(model.state_dict())
    utils.checkdir(
        f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/")
    torch.save(
        model,
        f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth.tar"
    )
    print(
        "  initial model saved in ",
        f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/initial_state_dict_{args.prune_type}.pth.tar"
    )

    # Making Initial Mask
    make_mask(model)

    # Optimizer and Loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss()  # Default was F.nll_loss

    # Layer Looper
    for name, param in model.named_parameters():
        print(name, param.size())

    # Pruning
    # NOTE First Pruning Iteration is of No Compression
    bestacc = 0.0
    best_accuracy = 0
    ITERATION = args.prune_iterations
    comp = np.zeros(ITERATION, float)
    bestacc = np.zeros(ITERATION, float)
    step = 0
    all_loss = np.zeros(args.end_iter, float)
    all_vloss = np.zeros(args.end_iter, float)
    all_accuracy = np.zeros(args.end_iter, float)

    for _ite in range(args.start_iter, ITERATION):

        # Early stopping parameter for each pruning iteration
        early_stopping = EarlyStopping(
            patience=99,
            verbose=True)  ######### we don't stop, party all night

        if not _ite == 0:
            prune_by_percentile(args.prune_percent,
                                args.fc_prune_percent,
                                resample=resample,
                                reinit=reinit,
                                layerwise=layerwise,
                                if_split=args.split_conv_and_fc)
            if reinit:
                model.apply(weight_init)
                #if args.arch_type == "fc1":
                #    model = fc1.fc1().to(device)
                #elif args.arch_type == "lenet5":
                #    model = LeNet5.LeNet5().to(device)
                #elif args.arch_type == "alexnet":
                #    model = AlexNet.AlexNet().to(device)
                #elif args.arch_type == "vgg16":
                #    model = vgg.vgg16().to(device)
                #elif args.arch_type == "resnet18":
                #    model = resnet.resnet18().to(device)
                #elif args.arch_type == "densenet121":
                #    model = densenet.densenet121().to(device)
                #else:
                #    print("\nWrong Model choice\n")
                #    exit()
                step = 0
                for name, param in model.named_parameters():
                    if 'weight' in name:
                        weight_dev = param.device
                        param.data = torch.from_numpy(
                            param.data.cpu().numpy() *
                            mask[step]).to(weight_dev)
                        step = step + 1
                step = 0
            else:
                original_initialization(mask, initial_state_dict)
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=args.lr,
                                         weight_decay=1e-4)

        time.sleep(0.25)
        print(f"\n--- Pruning Level [{ITE}:{_ite}/{ITERATION}]: ---")

        # Print the table of Nonzeros in each layer
        comp1 = utils.print_nonzeros(model)
        comp[_ite] = comp1

        # pbar = range(args.end_iter)
        pbar = tqdm(range(args.end_iter))

        stop_flag = False
        for iter_ in pbar:

            # Frequency for Testing
            if iter_ % args.valid_freq == 0:
                accuracy = test(model, test_loader, criterion)

                # Save Weights
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    # We don't save model per test-acc, will use validation-acc!
                    # utils.checkdir(f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/")
                    # torch.save(model,f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth.tar")

            # Training
            loss = train(model, train_loader, optimizer, criterion)

            all_loss[iter_] = loss
            all_accuracy[iter_] = accuracy

            # Validating
            valid_loss, loss_v = validate(model, valid_loader, optimizer,
                                          criterion)
            all_vloss[iter_] = valid_loss  #loss_v

            # early stopping
            checkpoint_path = f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/"
            save_path = f"{os.getcwd()}/saves/{now_}{args.arch_type}/{args.dataset}/{_ite}_model_{args.prune_type}.pth.tar"
            # msg = early_stopping(valid_loss, model, checkpoint_path, save_path)
            early_stopping(valid_loss, model, checkpoint_path, save_path)

            # Frequency for Printing Accuracy and Loss
            if iter_ % args.print_freq == 0:
                time.sleep(0.25)
                pbar.set_description(
                    # f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}% \t' + msg)
                    f'Train Epoch: {iter_}/{args.end_iter} Loss: {loss:.6f} V-Loss: {valid_loss:.6f} Accuracy: {accuracy:.2f}% Best Accuracy: {best_accuracy:.2f}%'
                )
                if iter_ % 5 == 4:
                    print('')

            if early_stopping.early_stop and not stop_flag:
                print("Early stopping")
                stop_flag = True
                # break

        writer.add_scalar('Accuracy/test', best_accuracy, comp1)
        bestacc[_ite] = best_accuracy

        # Plotting Loss (Training), Accuracy (Testing), Iteration Curve
        #NOTE Loss is computed for every iteration while Accuracy is computed only for every {args.valid_freq} iterations. Therefore Accuracy saved is constant during the uncomputed iterations.
        #NOTE Normalized the accuracy to [0,100] for ease of plotting.
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 100 * (all_loss - np.min(all_loss)) /
                 np.ptp(all_loss).astype(float),
                 c="blue",
                 label="Train loss")
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 100 * (all_vloss - np.min(all_vloss)) /
                 np.ptp(all_vloss).astype(float),
                 c="green",
                 label="Valid loss")
        plt.plot(np.arange(1, (args.end_iter) + 1),
                 all_accuracy,
                 c="red",
                 label="Accuracy")
        plt.title(
            f"Loss Vs Accuracy Vs Iterations ({args.dataset},{now_}{args.arch_type})"
        )
        plt.xlabel("Iterations")
        plt.ylabel("Loss and Accuracy")
        plt.legend()
        plt.grid(color="gray")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{now_}{args.arch_type}/{args.dataset}/")
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_LossVsAccuracy_{comp1}.png",
            dpi=300)
        plt.close()

        # Dump Plot values
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/")
        all_loss.dump(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_all_loss_{comp1}.dat"
        )
        all_accuracy.dump(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_all_accuracy_{comp1}.dat"
        )

        # Dumping mask
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/")
        with open(
                f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_mask_{comp1}.pkl",
                'wb') as fp:
            pickle.dump(mask, fp)

        # Making variables into 0
        best_accuracy = 0
        all_loss = np.zeros(args.end_iter, float)
        all_accuracy = np.zeros(args.end_iter, float)

        # Dumping Values for Plotting
        utils.checkdir(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/")
        comp.dump(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_compression.dat"
        )
        bestacc.dump(
            f"{os.getcwd()}/dumps/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_bestaccuracy.dat"
        )

        # Plotting
        a = np.arange(args.prune_iterations)
        plt.plot(a, bestacc, c="blue", label="Winning tickets")
        plt.title(
            f"Test Accuracy vs Unpruned Weights Percentage ({args.dataset},{now_}{args.arch_type})"
        )
        plt.xlabel("Unpruned Weights Percentage")
        plt.ylabel("test accuracy")
        plt.xticks(a, comp, rotation="vertical")
        plt.ylim(0, 100)
        plt.legend()
        plt.grid(color="gray")
        utils.checkdir(
            f"{os.getcwd()}/plots/lt/{now_}{args.arch_type}/{args.dataset}/")
        plt.savefig(
            f"{os.getcwd()}/plots/lt/{now_}{args.arch_type}/{args.dataset}/{args.prune_type}_AccuracyVsWeights.png",
            dpi=300)
        plt.close()

    print('Training ended~~~')