コード例 #1
0
def showRPN():
    Reg = Regular.Addpoint(e.get())  # 添加连接运算符
    RPN = Regular.createRPN(Reg)  # 生成逆波兰表达式
    output = '逆波兰表达式:'
    for r in RPN:
        output += r
        output += ' '
    var.set(output)
コード例 #2
0
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    
    # switch to train mode
    model.train()
    
    if WEIGHT_DECAY > 0:
        import Regular
        reg_loss = Regular.Regularization(model, WEIGHT_DECAY, p=REG)
        print('Regularization...')
    else:
        reg_loss = 0.
        print('No Regularization...')
        
    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda()
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var.cuda())
        loss = criterion(output, target_var.cuda()) + reg_loss(model)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5))
コード例 #3
0
def createall():
    global index_final
    global min_table
    global character
    if e.get() != '':
        min_table, index_final, character = Regular.REG2MinDFA(e.get())
        var1.set('完成对' + e.get() + '的分析')
        Label_Show2.config(fg='green')
        btn_Show1.config(state='active')
        btn_Show2.config(state='active')
        btn_Show3.config(state='active')
        btn_Show4.config(state='active')
        btn_Show6.configure(state='active')
        btn_Show7.configure(state='active')

    else:
        var1.set('输入为空,请重新输入')
        Label_Show2.config(fg='red')
コード例 #4
0
def Fit(iteration, train_set, val_set, model, loss, optimizer, batch_size,
        epochs):

    val_loader = DataLoader(val_set,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=WORKERS)

    loss_history, acc_history = [], []
    best_acc = 0.0
    patience = 0

    if WEIGHT_DECAY > 0:
        import Regular
        reg_loss = Regular.Regularization(model, WEIGHT_DECAY, p=REG)
        print('Regularization...')
    else:
        reg_loss = 0.
        print('No Regularization...')

    print('Image Augment...')
    from keras.preprocessing.image import ImageDataGenerator
    datagen = ImageDataGenerator(rotation_range=30,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 zoom_range=[0.8, 1.2],
                                 shear_range=0.2,
                                 horizontal_flip=True)

    for epoch in range(epochs):
        if patience == PATIENCE:
            print("Early Stopping...")
            break

        epoch_start_time = time.time()

        adjust_learning_rate(optimizer, epoch)

        train_acc, train_loss = 0.0, 0.0
        val_acc, val_loss = 0.0, 0.0

        model.train()  # Switch to train mode
        for i, batch in enumerate(datagen.flow(train_set[:][0].view(len(train_set[:][0]),48,48,1),\
                                               train_set[:][1], batch_size=batch_size)):

            if i == AUG_SIZE * (int(train_set.__len__() / batch_size) +
                                1):  # one epoch
                break
            else:
                i += 1

            # type transform
            batch = (torch.FloatTensor(batch[0]), torch.LongTensor(batch[1]))
            batch = (batch[0].view(len(batch[0]), 1, 48, 48), batch[1])

            # compute output
            train_pred = model(batch[0].cuda())
            batch_loss = loss(train_pred, batch[1].cuda()) + reg_loss(model)

            # compute gradient and do step
            optimizer.zero_grad()
            batch_loss.backward()
            optimizer.step()

            train_pred = train_pred.float()
            batch_loss = batch_loss.float()

            train_acc += np.sum(
                np.argmax(train_pred.cpu().data.numpy(), axis=1) ==
                batch[1].numpy())
            train_loss += batch_loss.item()

        model.eval()  # Switch to evaluate mode
        for i, data in enumerate(val_loader):
            # compute output
            with torch.no_grad():
                val_pred = model(data[0].cuda())
                batch_loss = loss(val_pred, data[1].cuda())

            val_pred = val_pred.float()
            batch_loss = batch_loss.float()

            val_acc += np.sum(
                np.argmax(val_pred.cpu().data.numpy(), axis=1) ==
                data[1].numpy())
            val_loss += batch_loss.item()

        train_acc = train_acc / (train_set.__len__() * AUG_SIZE)
        val_acc = val_acc / val_set.__len__()

        print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.3f Loss: %3.3f | Val Acc: %3.3f loss: %3.3f' % \
                (epoch + 1, epochs, time.time()-epoch_start_time, \
                 train_acc, train_loss, val_acc, val_loss))

        loss_history.append((train_loss, val_loss))
        acc_history.append((train_acc, val_acc))

        # Early Stopping
        if (val_acc > best_acc):
            torch.save(model.state_dict(),
                       MODEL_PATH + '/' + str(iteration + 1) + '_model.pth')
            best_acc = val_acc
            patience = 0
            print('Model Saved!')
        else:
            patience += 1

    return np.asarray(loss_history), np.asarray(acc_history)
コード例 #5
0
def Fit(train_set,val_set,model,loss,optimizer,batch_size,epochs):
    
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=8)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=8)   
    
    loss_history, acc_history = [], []
    best_acc = 0.0
    
    if WEIGHT_DECAY > 0:
        import Regular
        reg_loss = Regular.Regularization(model, WEIGHT_DECAY, p=REG)
    else:
        reg_loss = 0.
        print('No Regularization...')
    
    for epoch in range(epochs):
        epoch_start_time = time.time()
        
        adjust_learning_rate(optimizer, epoch)
        
        train_acc, train_loss = 0.0, 0.0
        val_acc, val_loss = 0.0, 0.0
    
        model.train() # Switch to train mode
        for i, data in enumerate(train_loader):
            # data augment
            X_aug, y_aug = DataAug(data[0], data[1], AUG_SIZE, batch_size) # will have 2 * batch_size
            data[0] = torch.cat((data[0], X_aug))
            data[1] = torch.cat((data[1], y_aug))  
            # will suffer the size is too big, such the cuda mem is not enough to train! 

            # compute output
            train_pred = model(data[0].cuda())
            batch_loss = loss(train_pred, data[1].cuda()) + reg_loss(model)
            
            # compute gradient and do step
            optimizer.zero_grad()
            batch_loss.backward()
            optimizer.step()
            
            train_pred = train_pred.float()
            batch_loss = batch_loss.float()
            
            train_acc += np.sum(np.argmax(train_pred.cpu().data.numpy(), axis=1) == data[1].numpy())
            train_loss += batch_loss.item()
     
        model.eval() # Switch to evaluate mode
        for i, data in enumerate(val_loader):
            # compute output
            with torch.no_grad():
                val_pred = model(data[0].cuda())
                batch_loss = loss(val_pred, data[1].cuda())
    
            val_pred = val_pred.float()
            batch_loss = batch_loss.float()
    
            val_acc += np.sum(np.argmax(val_pred.cpu().data.numpy(), axis=1) == data[1].numpy())
            val_loss += batch_loss.item()
            
        train_acc = train_acc/(train_set.__len__() * (AUG_SIZE+1))    
        val_acc = val_acc/val_set.__len__()
        
        print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.3f Loss: %3.3f | Val Acc: %3.3f loss: %3.3f' % \
                (epoch + 1, epochs, time.time()-epoch_start_time, \
                 train_acc, train_loss, val_acc, val_loss))
        
        loss_history.append((train_loss,val_loss))
        acc_history.append((train_acc,val_acc))
                
        if (val_acc > best_acc):
            torch.save(model.state_dict(), MODE_PATH+'/model.pth')
            best_acc = val_acc
            print ('Model Saved!') 
    
    return np.asarray(loss_history), np.asarray(acc_history)
コード例 #6
0
import Regular
import stringgenerator

while True:
    print('1. Generate new file')
    print('2. Check file')
    print('3. Enter string from keyboard')
    print('0. Quit')
    print('Make your choice: ')
    c = input()
    if c.isdigit():
        choice = int(c)
        if choice == 1:
            stringgenerator.generator()
        elif choice == 2:
            Regular.CheckFromFile()
        elif choice == 3:
            print('Enter the string: ')
            str = input()
            print('RegEx result:', Regular.check(str).rstrip('\n'))
        elif choice == 0:
            break
        else:
            print('Wrong choice, try again!')
    else:
        print('Wrong choice, try again!')
print('Bye Bye')