예제 #1
0
파일: api.py 프로젝트: uri247/arch
 def post(self, firmid, clsfid):
     clsf_key = ndb.Key( 'Firm', firmid, 'Classification', clsfid )
     clsf = clsf_key.get()
     if clsf:
         self.error(409)
     else:
         d = json.loads( self.request.body )
         clsf = Classification( key=clsf_key, **d )
         clsf.put()
예제 #2
0
 def post(self, firmid, clsfid):
     clsf_key = ndb.Key('Firm', firmid, 'Classification', clsfid)
     clsf = clsf_key.get()
     if clsf:
         self.error(409)
     else:
         d = json.loads(self.request.body)
         clsf = Classification(key=clsf_key, **d)
         clsf.put()
예제 #3
0
 def get(self, firmid):
     self.json_content()
     firm_key = ndb.Key('Firm', firmid)
     query = Classification.query(ancestor=firm_key)
     classifications = query.map(
         lambda classification: classification.to_dict())
     self.w(json.dumps(classifications))
예제 #4
0
def training():
    context_x = Context(inputs_dict.n_words, hidden_size).to(device)
    classification_x = Classification().to(device)

    context_x, classification_x, plot_losses = trainIters(context_x,
                                                          classification_x,
                                                          device,
                                                          inputs_dict,
                                                          target_dict,
                                                          pairs,
                                                          n_iters,
                                                          print_every=50)

    return context_x, classification_x, plot_losses
예제 #5
0
 def preview(self):
     ''' Ajax请求,无论同步或者异步,都不能正确跳转;通过form.submit()实现
     article = request.get_json(force=True)
     return self.render('article.html', article=article, mode='preview')
     '''
     classification = Classification.objects(
         id=request.form.get('classification')).first()
     # TODO 异常处理
     article = {
         'title': request.form.get('title'),
         'content': request.form.get('content'),
         'abstract': request.form.get('abstract'),
         'classification': classification,
     }
     return self.render('article.html', article=article, mode='preview')
예제 #6
0
def main():
    #define the dataloader
    train_loader = DataLoader(SkeletonFeeder(mode='train', debug=False), batch_size=params['batchsize'], shuffle=True, num_workers=params['numworkers'])
    val_loader = DataLoader(SkeletonFeeder(mode='valid', debug=False), batch_size=params['batchsize'], shuffle=False, num_workers=params['numworkers'])
    n_class = params['n_class']
    cur_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    #use the model and transfer to gpu
    model = Classification(n_class=n_class) #add model cfg 
    model = model.cuda(params['gpu'][0])#to do--->
    model = nn.DataParallel(model, device_ids=params['gpu'])

    
    if params['retrain']:
        trained_dict = torch.load(params['retrain'],map_location='cpu')
        model_dict = model.state_dict()
        trained_dict = {k:v for k,v in trained_dict.items() if k in model_dict}      
        model_dict.update(trained_dict)
        model.load_state_dict(model_dict)
        print('load trained model finish')
        

    model_params = filter(lambda p: p.requires_grad, model.parameters())
    
    
    optimizer = optim.Adam(model_params, lr=params['lr'], weight_decay=params['weight_decay'])
    schedule = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.333, patience=2, verbose=True)
    writer = SummaryWriter()
    criterion = nn.CrossEntropyLoss()
    min_loss = 1000
    
    print('-------------------start training----------------------')
    print('lr:', optimizer.param_groups[0]['lr'])
    for i in range(params['epoch']):
        train_loss, train_top1, train_top5, batch_time, data_time = train(model, train_loader, optimizer,criterion)
        valid_loss, val_top1, val_top5 = valid(model, val_loader,optimizer,criterion)
        schedule.step(valid_loss)

        f = open(params['log']+'bert_classifylog_'+cur_time+'.txt', 'a')
        print('epoch:', str(i + 1) + "/" + str(params['epoch']))
        print('data time:%0.3f'%data_time.avg, 'batch time:%0.3f'%batch_time.avg, 'epoch time:%0.3f'%(batch_time.sum))
        print('train loss:%0.8f'%train_loss, 'top1:%0.2f'%train_top1, '%', 'top5:%0.2f'%train_top5, '%', 'lr:', optimizer.param_groups[0]['lr'])
        print('valid loss:%0.8f'%valid_loss, 'top1:%0.2f'%val_top1, '%', 'top5:%0.2f'%val_top5, '%')
        f.write('epoch:'+str(i+1)+"/"+str(params['epoch'])+'\n')
        f.write('data time:%0.3f'%data_time.avg+'batch time:%0.3f'%batch_time.avg+'epoch time:%0.3f'%(batch_time.sum)+'\n')
        f.write('train loss:%0.8f'%train_loss+'top1:%0.2f'%train_top1+'%'+'top5:%0.2f'%train_top5+'%'+'lr:'+str(optimizer.param_groups[0]['lr'])+'\n')
        f.write('valid loss:%0.8f'%valid_loss+'top1:%0.2f'%val_top1+'%'+'top5:%0.2f'%val_top5+'%'+'\n')
        f.write('************************************\n')
        f.close()
        
        writer.add_scalar('train loss', train_loss, i)
        writer.add_scalar('valid loss', valid_loss, i) 
        writer.add_scalar('train top1', train_top1, i)
        writer.add_scalar('valid top1', val_top1, i)
        writer.add_scalar('train top5', train_top5, i)
        writer.add_scalar('valid top5', val_top5, i)
        
        if valid_loss < min_loss:
            torch.save(model.state_dict(), params['save_path']+'bert_classifymodel_'+cur_time+'.pth')
            print('saving model successful to --->',params['save_path'])
            min_loss = valid_loss

    writer.close()
예제 #7
0
image_row_size = image_size[0] * image_size[1]
image_size = (100, 100)
image_row_size = image_size[0] * image_size[1]

mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform = transforms.Compose([
    transforms.Resize(image_size),
    # transforms.Grayscale(),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

path = '/home/aims/Documents/Pytorch/pytorch_exercise/data'

net = Classification()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

train_data = CatDogDataset(path + "/" + 'train', transform=transform)
test_data = CatDogDataset(path + "/" + 'val', transform=transform)

trainloader = torch.utils.data.DataLoader(test_data,
                                          batch_size=64,
                                          shuffle=True,
                                          num_workers=4)
testloader = torch.utils.data.DataLoader(test_data,
                                         batch_size=64,
                                         shuffle=True,
                                         num_workers=4)
#Training
예제 #8
0
파일: api.py 프로젝트: uri247/arch
 def get(self, firmid):
     self.json_content()
     firm_key = ndb.Key('Firm', firmid)
     query = Classification.query(ancestor=firm_key)
     classifications = query.map(lambda classification: classification.to_dict())
     self.w(json.dumps(classifications))
예제 #9
0
        normed[i, 0:data_length_array[i]] = (
            data_steps_array[i, 0:data_length_array[i]] -
            data_steps_array[i, 0:data_length_array[i]].mean(axis=0)
        ) / data_steps_array[i, 0:data_length_array[i]].std(axis=0)
    # Shuffle data
    normed, data_labels_array, data_length_array = shuffle(normed,
                                                           data_labels_array,
                                                           data_length_array,
                                                           random_state=47)

    data = tf.placeholder(tf.float32, [None, normed.shape[1], normed.shape[2]])
    target = tf.placeholder(tf.float32, [None, num_classes])
    length = tf.placeholder(tf.float32, [None])
    learning_rate = tf.placeholder(tf.float32, shape=[])

    model = Classification(data, target, length, learning_rate, num_RNN,
                           num_FCN)

    # Save only one checkpoint
    saver = tf.train.Saver(max_to_keep=1)

    all_error = []
    best_error = {'epoch': [], 'best_acc': []}

    train_index = []
    test_index = []
    for train_ind, test_ind in KFold(n_split, random_state=47).split(normed):
        train_index.append(train_ind)
        test_index.append(test_ind)

    arg_index = int(sys.argv[1])
    sess = tf.Session()
예제 #10
0
from pytorch_lightning.callbacks import ModelCheckpoint

from dataset import ArgsBase, NSMCDataModule, KorSTSDataModule
from model import SubtaskGPT2, SubtaskGPT2Regression, Classification

parser = argparse.ArgumentParser(description='Train KoGPT2 subtask model')

parser.add_argument('--task', type=str, default=None, help='subtask name')
parser.add_argument('--do_test',
                    action='store_true',
                    help='evaluate on test set')
parser.add_argument('--checkpoint_path', type=str, default=None)

if __name__ == '__main__':
    parser = ArgsBase.add_model_specific_args(parser)
    parser = Classification.add_model_specific_args(parser)
    parser = NSMCDataModule.add_model_specific_args(parser)
    parser = KorSTSDataModule.add_model_specific_args(parser)

    parser = Trainer.add_argparse_args(parser)
    args = parser.parse_args()

    logging.getLogger().setLevel(logging.INFO)
    logging.info(args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    if args.task.lower() == 'nsmc':
        dm = NSMCDataModule(args.train_data_path,
예제 #11
0
parse.add_argument("--log", type=int, default=20, help="")
parse.add_argument("--no_cuda", action="store_true", default=False, help="")
args = parse.parse_args()

#####数据处理
normalize = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
##判断gpu是否可用并生成随机种子
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)
model = Classification(2)
if args.cuda:
    model.cuda()
    cudnn.benchmark = True

batch_size = args.batch
##构建数据迭代器  训练
dataset = MyData(args.datas, transforms=normalize)
valid = MyData(args.test, transforms=normalize)
dataloader = DataLoader(dataset,
                        shuffle=True,
                        batch_size=batch_size,
                        num_workers=2,
                        collate_fn=collate_fn)
valid_dataloader = DataLoader(valid,
                              shuffle=True,
예제 #12
0
torch.manual_seed(1)
if args.cuda:
    torch.cuda.manual_seed(1)
    cudnn.benchmark = True

transform = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = MyData(args.data, transform)
dataloader = DataLoader(dataset,
                        batch_size=args.batch,
                        shuffle=False,
                        num_workers=4,
                        collate_fn=collate_fn)
for f in os.listdir(args.models):
    model = Classification(2, False)
    model.load_state_dict(torch.load(args.models + f)['models'])
    model.eval()
    if args.cuda:
        model.cuda()
    cnt = 0.0
    for i, (image, label) in enumerate(dataloader):
        image = Variable(image.cuda(), requires_grad=False)
        output = model(image, None)
        output = output.cpu().detach().numpy()
        index = np.argmax(output, 1)
        cnt += sum(np.array(index, dtype=np.int) == label.detach().numpy())
    print(f, "acc:{:.4f}".format(cnt / len(dataset)))