예제 #1
0
    def connect(self):
        self.ip = str(
            self.ipInput.text()) if self.ipInput.text() else '127.0.0.1'
        self.port = int(self.portInput.text()) if self.portInput.text() else 21
        self.username = str(
            self.userInput.text()) if self.userInput.text() else 'anonymous'
        self.password = str(self.passInput.text())

        self.q_info.put(
            utils.colorful(
                'Connecting to ' + self.ip + ':' + str(self.port) + '...',
                'black'))

        self.q_cmd.put('connect')
        self.q_cmd.put(self.ip)
        self.q_cmd.put(self.port)

        if not self.parentPipe.recv() == 'connected':
            self.q_info.put(utils.colorful('connect error.', 'red'))
            return

        self.connected = True

        self.q_info.put(utils.colorful('Log in as ' + self.username, 'black'))

        self.q_cmd.put('login')
        self.q_cmd.put(self.username)
        self.q_cmd.put(self.password)
        if not self.parentPipe.recv() == 'ok':
            return
        self.q_cmd.put('list')
예제 #2
0
def get_instance_dataloader(args):
    minimum_crop = 0.2
    logging.info(colorful('ResizedCrop from {} to 1'.format(minimum_crop)))
    if args.blur:
        train_transforms = transforms.Compose([
            transforms.RandomResizedCrop(224, scale=(minimum_crop, 1.)),
            transforms.RandomApply(
                [
                    transforms.ColorJitter(0.4, 0.4, 0.4,
                                           0.1)  # not strengthened
                ],
                p=0.8),
            transforms.RandomGrayscale(p=0.2),
            transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
    else:
        train_transforms = transforms.Compose([
            transforms.RandomResizedCrop(224, scale=(minimum_crop, 1.)),
            transforms.RandomGrayscale(p=0.2),
            transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
    val_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    train_dataset = ImageNetInstance(train=True,
                                     image_transforms=train_transforms,
                                     imagenet_dir=IMAGENET_DIR if args.dataset
                                     == 'imagenet' else HUN_IMAGENET_DIR)
    val_dataset = ImageNetInstance(train=False,
                                   image_transforms=val_transforms,
                                   imagenet_dir=IMAGENET_DIR if args.dataset
                                   == 'imagenet' else HUN_IMAGENET_DIR)
    train_samples = train_dataset.dataset.samples
    train_labels = [train_samples[i][1] for i in range(len(train_samples))]
    train_ordered_labels = np.array(train_labels)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=args.n_workers)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            pin_memory=True,
                            num_workers=args.n_workers)

    return train_loader, val_loader, train_ordered_labels, train_dataset, val_dataset
예제 #3
0
 def disconnect(self):
     if not self.connected:
         return
     self.q_info.put(
         utils.colorful(
             'Disonnected from ' + self.ip + ':' + str(self.port), 'black'))
     self.q_cmd.put('quit')
     self.connected = False
    def __init__(self,
                 block,
                 num_blocks,
                 low_dim=128,
                 dropout=False,
                 non_linear_head=False,
                 mlpbn=False,
                 bnaffine=True):
        super(ResNet, self).__init__()
        self.in_planes = 64
        self.dropout = dropout

        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        # self.linear = nn.Linear(512*block.expansion, 128)
        if not non_linear_head:
            self.linear = nn.Linear(512 * block.expansion, low_dim)
        else:
            logging.info(colorful('Using Non Linear Head'))
            if mlpbn:
                logging.info('Using BatchNorm in MLP head projection')
                self.linear = nn.Sequential(
                    nn.Linear(512 * block.expansion, 512 * block.expansion),
                    nn.BatchNorm1d(512 * block.expansion, affine=bnaffine),
                    nn.ReLU(inplace=True),
                    nn.Linear(512 * block.expansion, low_dim),
                )
            else:
                logging.info('Not Using BatchNorm in MLP head projection')
                self.linear = nn.Sequential(
                    nn.Linear(512 * block.expansion, 512 * block.expansion),
                    nn.ReLU(inplace=True),
                    nn.Linear(512 * block.expansion, low_dim),
                )

        if self.dropout:
            self.dropout_layer = nn.Dropout(p=0.5)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
예제 #5
0
 def login(self, username='******', password=''):
     if self.q_info:
         self.q_info.put(utils.colorful('USER ' + username, 'purple'))
     self.sock.send(('USER ' + username + '\r\n').encode())
     res = self.__recv()
     if res.startswith('331'):
         if self.q_info:
             self.q_info.put(utils.colorful('PASS ' + password, 'purple'))
         self.sock.send(('PASS ' + password + '\r\n').encode())
         res = self.__recv()
         if self.pipe and res.startswith('230'):
             self.pipe.send('ok')
         elif self.pipe:
             self.pipe.send('error')
     else:
         if self.pipe:
             self.pipe.send('error')
     return res
예제 #6
0
    def __send_port(self):
        self.ip = '127.0.0.1'
        self.port = random.randint(20000, 65536)
        portstring = ','.join(self.ip.split('.')) + ',' + str(self.port // 256) + ',' + str(self.port % 256)

        if self.q_info:
            self.q_info.put(utils.colorful('PORT ' + portstring, 'purple'))
        self.sock.send(('PORT ' + portstring + '\r\n').encode())
        res = self.__recv()
        return res
예제 #7
0
 def __send_pasv(self):
     if self.q_info:
         self.q_info.put(utils.colorful('PASV', 'purple'))
     self.sock.send(('PASV' + '\r\n').encode())
     res = self.__recv()
     if res.startswith('227 '):
         raw = re.search('([0-9]{1,3},){5}[0-9]{1,3}', res)
         raw = raw.group(0)
         numl = raw.split(',')
         self.ip = '.'.join(numl[:4])
         self.port = int(numl[4]) * 256 + int(numl[5])
     return res
예제 #8
0
    def __init__(self,
                 block,
                 num_blocks,
                 low_dim=128,
                 dropout=False,
                 non_linear_head=False,
                 mlpbn=False):
        super(ResNet, self).__init__()
        self.in_planes = 64
        self.dropout = dropout

        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
        # self.linear = nn.Linear(512*block.expansion, 128)
        if not non_linear_head:
            self.linear = nn.Linear(512 * block.expansion, low_dim)
        else:
            logging.info(colorful('Using Non Linear Head'))
            if mlpbn:
                self.linear = nn.Sequential(
                    nn.Linear(512 * block.expansion, 512 * block.expansion),
                    nn.BatchNorm1d(512 * block.expansion),
                    nn.ReLU(inplace=True),
                    nn.Linear(512 * block.expansion, low_dim),
                )
            else:
                self.linear = nn.Sequential(
                    nn.Linear(512 * block.expansion, 512 * block.expansion),
                    nn.ReLU(inplace=True),
                    nn.Linear(512 * block.expansion, low_dim),
                )

        if self.dropout:
            self.dropout_layer = nn.Dropout(p=0.5)
예제 #9
0
 def __send_stor(self, filename, currentDir, currentNumber):
     if self.q_info:
         self.q_info.put(utils.colorful('STOR ' + filename, 'purple'))
     self.sock.send(('STOR ' + filename + '\r\n').encode())
     sockf = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     if self.pasv:
         sockf.connect((self.ip, self.port))
         res = self.__recv()
         if not res.startswith('150'):
             return
         th = threading.Thread(target=self.__storfile, args=(currentDir, filename, currentNumber, sockf))
         th.start()
     else:
         sockf.bind(('0.0.0.0', self.port))
         sockf.listen(1)
         self.__recv()
         if not res.startswith('150'):
             return
         conn, addr = sockf.accept()
         th = threading.Thread(target=self.__storfile, args=(currentDir, filename, currentDir, conn, sockf))
         th.start()
예제 #10
0
    def __send_list(self):
        if self.q_info:
            self.q_info.put(utils.colorful('LIST', 'purple'))
        self.sock.send(('LIST' + '\r\n').encode())
        sockf = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if self.pasv:
            sockf.connect((self.ip, self.port))
            res = self.__recv()
            if not res.startswith('150'):
                return
            dirinfo = ''
            while True:
                res = sockf.recv(self.size).decode()
                if not res:
                    break
                dirinfo = dirinfo + res
        else:
            sockf.bind(('0.0.0.0', self.port))
            sockf.listen(1)
            res = self.__recv()
            if not res.startswith('150'):
                return
            conn, addr = sockf.accept()
            dirinfo = ''
            with conn:
                while True:
                    res = conn.recv(self.size)
                    if not res:
                        break
                    dirinfo = dirinfo + res
        if self.q_dir2:
            self.q_dir2.put(dirinfo)

        res = self.__recv()
        sockf.close()
        return res
예제 #11
0
def get_semi_dataloader(args):
    minimum_crop = 0.08
    logging.info(colorful('ResizedCrop from {} to 1'.format(minimum_crop)))
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224, scale=(minimum_crop, 1.)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    val_transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])

    train_dataset = listData.ListData(os.path.join(IMAGENET_DIR, 'train'),
                                      args.list,
                                      transforms=train_transforms)
    val_dataset = datasets.ImageFolder(os.path.join(IMAGENET_DIR, 'val'),
                                       val_transforms)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=args.n_workers)
    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            pin_memory=True,
                            num_workers=args.n_workers)

    return train_loader, val_loader
예제 #12
0
    # 定义推理输出路径
    output_dir = "test_output_20190221_2"
    inference_out_path = os.path.join(inference_output_path_base,output_dir)
    if not os.path.exists(inference_out_path):
        os.makedirs(inference_out_path)

    for i in range(len(names)):
        name = names[i]

        image_path = os.path.join(img_path_test, name)
        image_bgr = cv2.imread(image_path,1)
        image_bgr = cv2.resize(image_bgr,(img_cols,img_rows),interpolation=cv2.INTER_CUBIC)
        image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
     
        x_test = np.empty((1, img_rows, img_cols, 3), dtype=np.float32)
        x_test[0, :, :, 0:3] = image_bgr/255.0

        out = pspnet50_model.predict(x_test)
        out = np.reshape(out, (img_rows, img_cols, num_classes))
        out = np.argmax(out, axis=2)

        result_rgb = colorful(out)

        # 同时展示原图/预测结果/预测结果叠加原图,并保存
        vis_seg_save_path = os.path.join(inference_out_path,name.split('.')[0]+"_predict_compare.png")
        vis_segmentation(image_rgb,result_rgb,vis_seg_save_path)
        print("generating: {}".format(vis_seg_save_path))

    K.clear_session()

예제 #13
0
    def __init__(self,
                 block,
                 layers,
                 low_dim=128,
                 in_channel=3,
                 width=1,
                 non_linear_head=False,
                 mlpbn=False):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channel,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)

        self.base = int(64 * width)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, self.base, layers[0])
        self.layer2 = self._make_layer(block,
                                       self.base * 2,
                                       layers[1],
                                       stride=2)
        self.layer3 = self._make_layer(block,
                                       self.base * 4,
                                       layers[2],
                                       stride=2)
        self.layer4 = self._make_layer(block,
                                       self.base * 8,
                                       layers[3],
                                       stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        print(non_linear_head)
        if not non_linear_head:
            self.fc = nn.Linear(self.base * 8 * block.expansion, low_dim)
        else:
            logging.info(colorful('Using Non Linear Head'))
            if mlpbn:
                self.fc = nn.Sequential(
                    nn.Linear(self.base * 8 * block.expansion,
                              self.base * 8 * block.expansion),
                    nn.BatchNorm1d(self.base * 8 * block.expansion),
                    nn.ReLU(inplace=True),
                    nn.Linear(self.base * 8 * block.expansion, low_dim),
                )
            else:
                self.fc = nn.Sequential(
                    nn.Linear(self.base * 8 * block.expansion,
                              self.base * 8 * block.expansion),
                    nn.ReLU(inplace=True),
                    nn.Linear(self.base * 8 * block.expansion, low_dim),
                )

        for m in self.modules():
            #if isinstance(m, nn.Conv2d):
            #n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            #m.weight.data.normal_(0, math.sqrt(2. / n))
            #elif isinstance(m, nn.BatchNorm2d):
            #m.weight.data.fill_(1)
            #m.bias.data.zero_()
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
예제 #14
0
파일: translate.py 프로젝트: lakca/scripts
        if 19968 <= ord(char) <= 40869:
            return 'en'
    return 'zh'


headers = [
    ['google', '谷歌'],
    ['youdao', '有道'],
    ['baidu', '百度'],
    ['deepl', 'deepl'],
    ['tencent', '腾讯'],
    ['alibaba', '阿里巴巴'],
    ['sogou', '搜狗'],
]

word = ' '.join(argv[1:]).strip()
lang = to_language(word)
width = max([wcswidth(e[1]) for e in headers])

print()
print(colorful(word, 'magenta'))
print()

for [k, col] in headers:
    Thread(target=lambda col: print(
        colorful(col.ljust(width - wcswidth(col) + len(col)), 'green') + ': ' +
        colorful(
            silence(lambda: getattr(translators, k)
                    (word, to_language=lang), ''), 'red')),
           args=(col, )).start()
예제 #15
0
 def __send_quit(self):
     if self.q_info:
         self.q_info.put(utils.colorful('QUIT', 'purple'))
     self.sock.send(('QUIT' + '\r\n').encode())
     res = self.__recv()
     return res
예제 #16
0
 def __send_rnto(self, name_new):
     if self.q_info:
         self.q_info.put(utils.colorful('RNTO ' + name_new, 'purple'))
     self.sock.send(('RNTO ' + name_new + '\r\n').encode())
     res = self.__recv()
     return res
예제 #17
0
 def __send_rnfr(self, name_old):
     if self.q_info:
         self.q_info.put(utils.colorful('RNFR ' + name_old, 'purple'))
     self.sock.send(('RNFR ' + name_old + '\r\n').encode())
     res = self.__recv()
     return res
예제 #18
0
 def __send_type(self, t='I'):
     if self.q_info:
         self.q_info.put(utils.colorful('TYPE ' + t, 'purple'))
     self.sock.send(('TYPE ' + t + '\r\n').encode())
     res = self.__recv()
     return res
예제 #19
0
파일: doc.py 프로젝트: lakca/scripts
                    'summary': e.get('summary', ''),
                })

        return (rows, cfg)


if __name__ == '__main__':
    from sys import argv
    type = None
    query = None
    opts = {}
    opts['filters'] = []
    if '-h' in argv:
        print('打开文档:doc.py :{type} [::{opt}...] [@{filter}...]')
        print(f'    {colorful("type", "red")}: ')
        for type in Query.get_types():
            print('          ' + colorful(type, 'green'))
        print('    Example: doc.py :mdn ::zh @http headers')
        exit(0)
    for arg in argv[1:]:
        if arg.startswith('::'):
            opts[arg[2:]] = True
        elif arg.startswith(':'):
            type = arg[1:]
        elif arg.startswith('@'):
            opts['filters'].append(arg[1:])
        else:
            query = arg
    q = Query()
    q.query(type, query, opts)
def main():

    global best_acc1
    best_acc1 = 0

    args = parse_option()

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    # set the data loader
    train_folder = os.path.join(args.data_folder, 'train')
    val_folder = os.path.join(args.data_folder, 'val')

    logger = getLogger(args.save_folder)
    if args.dataset.startswith('imagenet') or args.dataset.startswith(
            'places'):
        image_size = 224
        crop_padding = 32
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
        normalize = transforms.Normalize(mean=mean, std=std)
        if args.aug == 'NULL':
            train_transform = transforms.Compose([
                transforms.RandomResizedCrop(image_size,
                                             scale=(args.crop, 1.)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
        elif args.aug == 'CJ':
            train_transform = transforms.Compose([
                transforms.RandomResizedCrop(image_size,
                                             scale=(args.crop, 1.)),
                transforms.RandomGrayscale(p=0.2),
                transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
        else:
            raise NotImplemented('augmentation not supported: {}'.format(
                args.aug))

        val_transform = transforms.Compose([
            transforms.Resize(image_size + crop_padding),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            normalize,
        ])
        if args.dataset.startswith('imagenet'):
            train_dataset = datasets.ImageFolder(train_folder, train_transform)
            val_dataset = datasets.ImageFolder(
                val_folder,
                val_transform,
            )

        if args.dataset.startswith('places'):
            train_dataset = ImageList(
                '/data/trainvalsplit_places205/train_places205.csv',
                '/data/data/vision/torralba/deeplearning/images256',
                transform=train_transform,
                symbol_split=' ')
            val_dataset = ImageList(
                '/data/trainvalsplit_places205/val_places205.csv',
                '/data/data/vision/torralba/deeplearning/images256',
                transform=val_transform,
                symbol_split=' ')

        print(len(train_dataset))
        train_sampler = None

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=(train_sampler is None),
            num_workers=args.n_workers,
            pin_memory=False,
            sampler=train_sampler)

        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.n_workers,
                                                 pin_memory=False)
    elif args.dataset.startswith('cifar'):
        train_loader, val_loader = cifar.get_linear_dataloader(args)
    elif args.dataset.startswith('svhn'):
        train_loader, val_loader = svhn.get_linear_dataloader(args)

    # create model and optimizer
    if args.model == 'alexnet':
        if args.layer == 6:
            args.layer = 5
        model = AlexNet(128)
        model = nn.DataParallel(model)
        classifier = LinearClassifierAlexNet(args.layer, args.n_label, 'avg')
    elif args.model == 'alexnet_cifar':
        if args.layer == 6:
            args.layer = 5
        model = AlexNet_cifar(128)
        model = nn.DataParallel(model)
        classifier = LinearClassifierAlexNet(args.layer,
                                             args.n_label,
                                             'avg',
                                             cifar=True)
    elif args.model == 'resnet50':
        model = resnet50(non_linear_head=False)
        model = nn.DataParallel(model)
        classifier = LinearClassifierResNet(args.layer, args.n_label, 'avg', 1)
    elif args.model == 'resnet18':
        model = resnet18()
        model = nn.DataParallel(model)
        classifier = LinearClassifierResNet(args.layer,
                                            args.n_label,
                                            'avg',
                                            1,
                                            bottleneck=False)
    elif args.model == 'resnet18_cifar':
        model = resnet18_cifar()
        model = nn.DataParallel(model)
        classifier = LinearClassifierResNet(args.layer,
                                            args.n_label,
                                            'avg',
                                            1,
                                            bottleneck=False)
    elif args.model == 'resnet50_cifar':
        model = resnet50_cifar()
        model = nn.DataParallel(model)
        classifier = LinearClassifierResNet(args.layer, args.n_label, 'avg', 1)
    elif args.model == 'resnet50x2':
        model = InsResNet50(width=2)
        classifier = LinearClassifierResNet(args.layer, args.n_label, 'avg', 2)
    elif args.model == 'resnet50x4':
        model = InsResNet50(width=4)
        classifier = LinearClassifierResNet(args.layer, args.n_label, 'avg', 4)
    elif args.model == 'shufflenet':
        model = shufflenet_v2_x1_0(num_classes=128, non_linear_head=False)
        model = nn.DataParallel(model)
        classifier = LinearClassifierResNet(args.layer, args.n_label, 'avg',
                                            0.5)
    else:
        raise NotImplementedError('model not supported {}'.format(args.model))

    print('==> loading pre-trained model')
    ckpt = torch.load(args.model_path)
    if not args.moco:
        model.load_state_dict(ckpt['state_dict'])
    else:
        try:
            state_dict = ckpt['state_dict']
            for k in list(state_dict.keys()):
                # retain only encoder_q up to before the embedding layer
                if k.startswith('module.encoder_q'
                                ) and not k.startswith('module.encoder_q.fc'):
                    # remove prefix
                    state_dict['module.' +
                               k[len("module.encoder_q."):]] = state_dict[k]
                # delete renamed or unused k
                del state_dict[k]
            model.load_state_dict(state_dict)
        except:
            pass
    print("==> loaded checkpoint '{}' (epoch {})".format(
        args.model_path, ckpt['epoch']))
    print('==> done')

    model = model.cuda()
    classifier = classifier.cuda()

    criterion = torch.nn.CrossEntropyLoss().cuda(args.gpu)

    if not args.adam:
        optimizer = torch.optim.SGD(classifier.parameters(),
                                    lr=args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.Adam(classifier.parameters(),
                                     lr=args.learning_rate,
                                     betas=(args.beta1, args.beta2),
                                     weight_decay=args.weight_decay,
                                     eps=1e-8)

    model.eval()
    cudnn.benchmark = True

    # optionally resume from a checkpoint
    args.start_epoch = 1
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location='cpu')
            # checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch'] + 1
            classifier.load_state_dict(checkpoint['classifier'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            best_acc1 = checkpoint['best_acc1']
            print(best_acc1.item())
            best_acc1 = best_acc1.cuda()
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
            if 'opt' in checkpoint.keys():
                # resume optimization hyper-parameters
                print('=> resume hyper parameters')
                if 'bn' in vars(checkpoint['opt']):
                    print('using bn: ', checkpoint['opt'].bn)
                if 'adam' in vars(checkpoint['opt']):
                    print('using adam: ', checkpoint['opt'].adam)
                #args.learning_rate = checkpoint['opt'].learning_rate
                # args.lr_decay_epochs = checkpoint['opt'].lr_decay_epochs
                args.lr_decay_rate = checkpoint['opt'].lr_decay_rate
                args.momentum = checkpoint['opt'].momentum
                args.weight_decay = checkpoint['opt'].weight_decay
                args.beta1 = checkpoint['opt'].beta1
                args.beta2 = checkpoint['opt'].beta2
            del checkpoint
            torch.cuda.empty_cache()
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # tensorboard
    tblogger = tb_logger.Logger(logdir=args.tb_folder, flush_secs=2)

    # routine
    best_acc = 0.0
    for epoch in range(args.start_epoch, args.epochs + 1):

        adjust_learning_rate(epoch, args, optimizer)
        print("==> training...")

        time1 = time.time()
        train_acc, train_acc5, train_loss = train(epoch, train_loader, model,
                                                  classifier, criterion,
                                                  optimizer, args)
        time2 = time.time()
        logging.info('train epoch {}, total time {:.2f}'.format(
            epoch, time2 - time1))

        logging.info(
            'Epoch: {}, lr:{} , train_loss: {:.4f}, train_acc: {:.4f}/{:.4f}'.
            format(epoch, optimizer.param_groups[0]['lr'], train_loss,
                   train_acc, train_acc5))

        tblogger.log_value('train_acc', train_acc, epoch)
        tblogger.log_value('train_acc5', train_acc5, epoch)
        tblogger.log_value('train_loss', train_loss, epoch)
        tblogger.log_value('learning_rate', optimizer.param_groups[0]['lr'],
                           epoch)

        test_acc, test_acc5, test_loss = validate(val_loader, model,
                                                  classifier, criterion, args)

        if test_acc >= best_acc:
            best_acc = test_acc

        logging.info(
            colorful(
                'Epoch: {}, val_loss: {:.4f}, val_acc: {:.4f}/{:.4f}, best_acc: {:.4f}'
                .format(epoch, test_loss, test_acc, test_acc5, best_acc)))
        tblogger.log_value('test_acc', test_acc, epoch)
        tblogger.log_value('test_acc5', test_acc5, epoch)
        tblogger.log_value('test_loss', test_loss, epoch)

        # save the best model
        if test_acc > best_acc1:
            best_acc1 = test_acc
            state = {
                'opt': args,
                'epoch': epoch,
                'classifier': classifier.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }
            save_name = '{}_layer{}.pth'.format(args.model, args.layer)
            save_name = os.path.join(args.save_folder, save_name)
            print('saving best model!')
            torch.save(state, save_name)

        # save model
        if epoch % args.save_freq == 0:
            print('==> Saving...')
            state = {
                'opt': args,
                'epoch': epoch,
                'classifier': classifier.state_dict(),
                'best_acc1': test_acc,
                'optimizer': optimizer.state_dict(),
            }
            save_name = 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch)
            save_name = os.path.join(args.save_folder, save_name)
            print('saving regular model!')
            torch.save(state, save_name)

        # tensorboard logger
        pass
예제 #21
0
 def __send_rmd(self, dirname):
     if self.q_info:
         self.q_info.put(utils.colorful('RMD ' + dirname, 'purple'))
     self.sock.send(('RMD ' + dirname + '\r\n').encode())
     res = self.__recv()
     return res