Exemplo n.º 1
0
def main():
    args = parser.parse_args()
    args.timestamp = tools.get_timestamp()

    tools.mkdir_or_exist(args.workdir)
    tools.setup(args.benchmark, args.deterministic, args.seed)

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
Exemplo n.º 2
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    utils.saveargs(args)

    # initialize the checkpoint class
    checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion = models.setup(checkpoints)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    for epoch in range(args.nepochs):

        # train for a single epoch
        loss_train = trainer.train(epoch, loaders)
        loss_test = tester.test(epoch, loaders)

        if loss_best > loss_test:
            model_best = True
            loss_best = loss_test
            checkpoints.save(epoch, model, model_best)
Exemplo n.º 3
0
def main():
    """
    3种运行方式:
    1. 单CPU运行模式;
    2. 单GPU运行模式;
    3. 分布式运行模式:多机多卡 或 单机多卡。
    分布式优势:1.支持同步BN; 2.DDP每个训练有独立进程管理,训练速度更快,显存均衡;
    """
    args = parser.parse_args()
    # 根据训练机器和超参,选择运行方式
    num_gpus_available = torch.cuda.device_count()
    if num_gpus_available == 0:
        args.gpus = 0
    elif args.gpus > num_gpus_available:
        raise ValueError(
            f'--gpus(-g {args.gpus}) can not greater than available device({num_gpus_available})'
        )

    # 根据每个节点的GPU数量调整world size
    args.world_size = args.gpus * args.nodes
    if not args.cuda or args.world_size == 0:
        # 1. cpu运行模式
        args.cuda = False
        args.gpus = 0
        args.distributed = False
    elif args.world_size == 1:
        # 2. 单GPU运行模式
        args.distributed = False
    elif args.world_size > 1:
        # 3. 分布式运行模式
        args.distributed = True
    else:
        raise ValueError(
            f'Check config parameters --nodes/-n={args.nodes} and --gpus/-g={args.gpus}!'
        )

    if args.distributed and args.gpus > 1:
        # use torch.multiprocessing.spawn to launch distributed processes
        mp.spawn(main_worker, nprocs=args.gpus, args=(args, ))
    else:
        # Simply call main_worker function
        main_worker(0, args)
Exemplo n.º 4
0
def main():
    args = parser.parse_args()
    # in case no gpu is available

    # 1, store the genome information
    with open("input_file/input%d.pkl" % int(args.genome_id - 1), "rb") as f:
        genome = pickle.load(f)
    if os.path.exists("error_evaluation"):
        pass
    else:
        os.makedirs("error_evaluation")
    filename = os.path.join("error_evaluation",
                            (datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+".pkl"))
    with open(filename,"wb") as f:
        pickle.dump(genome,f)

    # 2, artificially assign a bad classification accuracy to it.
    accuracy = 0

    with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        pickle.dump(accuracy, f)
Exemplo n.º 5
0
import os
import time
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
import numpy as np
from config import parser
args = parser.parse_args()
import pickle
from network import Two_Stream_RNN
from dataloader import Face_Dataset, UtteranceRecord
from sklearn.metrics import mean_squared_error
from torch.autograd import Variable as Variable
import copy
from tqdm import tqdm
import glob
from Same_Length_Sampler import SameLengthBatchSampler
import pandas as pd


class My_loss(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x, y):
        vx = x - torch.mean(x)
        vy = y - torch.mean(y)
                           config.unary_closures)
            torch.save(test, test_file)

        if dev is None:
            logging.info('Dev dataset not found, generating...')
            dev = Dataset(dev_dir, 'dev', grammar, vocab, terminal_vocab,
                          config.syntax, config.max_example_action_num,
                          config.unary_closures)
            torch.save(dev, dev_file)

        if train is None:
            logging.info('Train dataset not found, generating...')
            train = Dataset(train_dir, 'train', grammar, vocab, terminal_vocab,
                            config.syntax, config.max_example_action_num,
                            config.unary_closures)
            torch.save(train, train_file)

    train.prepare_torch(config.cuda)
    dev.prepare_torch(config.cuda)
    test.prepare_torch(config.cuda)
    return train, dev, test


if __name__ == '__main__':
    config = parser.parse_args()
    config.syntax = 'dependency'
    config.unary_closures = False
    load_dataset(config, force_regenerate=True)
    config.unary_closures = True
    load_dataset(config, force_regenerate=True)
Exemplo n.º 7
0
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-----------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device("cuda" if use_cuda else "cpu")

        kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        '''
        构造DataLoader
        '''
        # ToDo 数据集需要重新制备
        print("Create Dataloader")
        self.train_loader = DataLoader(Dataset2(),
                                       batch_size=1,
                                       shuffle=True,
                                       **kwargs)
        self.test_loader = DataLoader(Dataset2(),
                                      batch_size=1,
                                      shuffle=True,
                                      **kwargs)
        '''
        定义模型
        '''
        print("Create Model")
        self.model = OPN().to(self.device)
        #        self.model = nn.DataParallel(OPN())
        if use_cuda:
            # self.model = self.model.cuda()
            cudnn.benchmark = True
        '''
        根据需要加载预训练的模型权重参数
        '''

        # VGG16模型配合预训练的模型用于检测
        self.vgg = models.vgg16(pretrained=True).to(self.device).features

        for i in self.vgg.parameters():
            i.requires_grad = False
        try:
            if self.args.resume and self.args.pretrained_weight:
                self.model.load_state_dict(torch.load(os.path.join('OPN.pth')),
                                           strict=False)
                print("模型加载成功")
        except:
            print("模型加载失败")
        '''
        cuda加速
        '''
        if use_cuda:
            #   self.model = nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        print("Establish the loss, optimizer and learning_rate function")
        self.loss_tv = TVLoss()
        self.loss_l1 = L1_Loss()
        # 另外还有style—loss 和 content—loss
        # self.optimizer = optim.SGD(
        #     params=self.model.parameters(),
        #     lr=self.args.lr,
        #     weight_decay=self.args.weight_decay,
        #     momentum=0.5
        # )
        self.optimizer = optim.Adam(
            params=self.model.parameters(),
            lr=0.001,
            betas=(0.9, 0.999),
            eps=1e-8,  # 为了防止分母为0
            weight_decay=0)
        # self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=1e-5)
        '''
        模型开始训练
        '''
        print("Start training")
        for epoch in tqdm(range(1, self.args.epoch + 1)):
            self.train(epoch)
            if epoch % 20 == 0:
                self.test(epoch)

        torch.cuda.empty_cache()

        print("finish model training")
Exemplo n.º 8
0
def polygonize(dir_base_name, base_name):
    args = parser.parse_args()

    chunksize = args.chunksize
    currentchunk = 0
    totalsubsets = 0

    outputgdal = dir_base_name + "-gdal-tmp.tif"
    # QGIS POLYGONIZE

    print ""
    print "Polygonizing (coarse):"
    print "----------------------"
    shapefile = dir_base_name + '.shp'
    if (not os.path.isfile(shapefile)):
        command = 'gdal_polygonize.py ' + outputgdal + ' -f "ESRI Shapefile" ' + shapefile + ' ' + base_name
        logging.debug(command)
        # print command
        os.system(command)

    # Split resulting megapolygon file into smaller chunks
    # most code from: http://cosmicproject.org/OGR/cris_example_write.html

    print ""
    print "Splitting megapolygon file into chunks"
    print "--------------------------------------"

    #####

    # 2 get the shapefile driver
    driver = ogr.GetDriverByName('ESRI Shapefile')

    # 3 open the input data source and get the layer
    inDS = driver.Open(shapefile, 0) #shows cover at given points
    if inDS is None:
        print 'Could not open shapefile'
        sys.exit(1)
    inLayer = inDS.GetLayer()

    # 5 get the FieldDefn's for the id and cover fields in the input shapefile
    feature = inLayer.GetFeature(0)
    idFieldDefn = feature.GetFieldDefnRef('DN')

    # 7 loop through the input features
    inFeature = inLayer.GetNextFeature()
    while inFeature:
        if currentchunk == 0 or currentchunk >= chunksize:
            currentchunk = 0
            totalsubsets = totalsubsets + 1
            # this is a new temp file
            # 4 create a new data source and layer
            fn = dir_base_name + '-tmp-' + str(totalsubsets) + '.shp'
            if os.path.exists(fn):driver.DeleteDataSource(fn)
            outDS = driver.CreateDataSource(fn)
            if outDS is None:
                print 'Could not create temp shapefile'
                sys.exit(1)
            outLayer = outDS.CreateLayer(base_name, geom_type=ogr.wkbPolygon)

            #create new field in the output shapefile
            outLayer.CreateField(idFieldDefn)

            # 6 get the FeatureDefn for the output layer
            featureDefn = outLayer.GetLayerDefn()

        # create a new feature
        outFeature = ogr.Feature(featureDefn)#using featureDefn created in step 6

        # set the geometry
        geom = inFeature.GetGeometryRef()
        outFeature.SetGeometry(geom) #move it to the new feature

        # set the attributes
        DN = inFeature.GetField('DN')
        outFeature.SetField('DN', DN) #move it to the new feature

        # add the feature to the output layer
        outLayer.CreateFeature(outFeature)

        # destroy the output feature
        outFeature.Destroy()

        # destroy the input feature and get a new one
        inFeature.Destroy()
        inFeature = inLayer.GetNextFeature()

        currentchunk = currentchunk + 1

    # close the data sources
    inDS.Destroy()
    outDS.Destroy() #flush out the last changes here

    print ""
    print "Produced " + str(totalsubsets) + " temporary shapefiles"
    print ""

    return totalsubsets
Exemplo n.º 9
0
def process_file(inputfile, basedir = ""):

    """NOTE: This still needs a lot of work for when dealing
       with subfolders and such.
       Best case is image file is located in same dir as vectorizer_map.py
    """

    args = parser.parse_args()

    gimp_path = args.gimp_path

    print "\n\nProcessing file: " + inputfile
    # right now assuming vectorizer, simplifier and input are in the same folder
    fullpath = os.path.abspath(__file__)

    base_name = inputfile[:inputfile.find(".tif")]
    base_name = base_name[base_name.rfind("/")+1:]

    # create a folder to store all this
    if basedir != '':
        directory = basedir + '/' + base_name
        inputfile = basedir + '/' + inputfile
    else:
        directory = base_name

    if not os.path.exists(directory):
        os.makedirs(directory)

    path = os.path.abspath(directory)#fullpath[:fullpath.find("/vectorize_map.py")] + '/' + directory

    # GIMP processing
    dir_base_name = os.path.join(directory, base_name)

    # create a log file
    # logfile = open(directory + "/py-log.txt", "w")
    logging.basicConfig(filename=os.path.join(directory, "py-log.txt"),
                        format='%(asctime)s %(message)s',level=logging.DEBUG)


    logging.debug("Log file for " + inputfile + " with colors:\n\n")
    logging.debug(str(args.vectorize_config['basecolors']) + "\n\n")

    thresholdize(inputfile, dir_base_name)

    totalsubsets = polygonize(dir_base_name, base_name)

    simplify(path, base_name, totalsubsets)

    consolidate(inputfile, path, dir_base_name, base_name)

    print ""
    print "Creating GeoJSON output..."
    print "--------------------------"
    jsonfile = dir_base_name + '-traced.json'
    shapefile = dir_base_name + '-traced.shp'
    command = 'ogr2ogr -t_srs EPSG:4326 -s_srs EPSG:3857 -f "GeoJSON" ' + jsonfile + ' ' + shapefile
    logging.debug(command)
    # print command
    os.system(command)

    # Cleaning
    print ""
    print "Cleaning..."
    print "-----------"
    os.system("rm " + dir_base_name + "-gdal-tmp.tif")
    os.system("rm " + dir_base_name + "-wsg-tmp.tif")
    os.system("rm " + dir_base_name + "-threshold-tmp.tif")
    os.system("rm " + dir_base_name + "-tmp-*.shp")
    os.system("rm " + dir_base_name + "-tmp-*.dbf")
    os.system("rm " + dir_base_name + "-tmp-*.shx")
    os.system("rm " + dir_base_name + "-tmp-*.prj")
    os.system("rm " + dir_base_name + "-tmp*.tif")
    os.system("rm " + dir_base_name + ".*")

    # close log file
    logging.shutdown()
Exemplo n.º 10
0
def run_experiment(cmdline_args=None):
    args = parser.parse_args(cmdline_args)
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}

    data_loaders = setup_dataloaders(args, kwargs)

    model = LotteryLeNet().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.learning_rate,
                          momentum=args.momentum)

    # --------------------------- #
    # --- Pruning Setup Start --- #

    cutoff = prunhild.cutoff.LocalRatioCutoff(args.cutoff_ratio)
    # don't prune the final bias weights
    params = list(model.parameters())[:-1]
    pruner = prunhild.pruner.CutoffPruner(params, cutoff, prune_online=True)

    # ---- Pruning Setup End ---- #
    # --------------------------- #

    logs_prune = []

    print("Pruning Start")
    torch.manual_seed(args.seed_dataloader)
    for epoch in range(1, args.epochs + 1):
        logs_prune += train(args,
                            model,
                            device,
                            data_loaders,
                            optimizer,
                            pruner,
                            epoch,
                            prune=True)
    print("\n\n\n")

    # -------------------------------------- #
    # --- Pruning Weight Resetting Start --- #

    # we want to demonstrate here how to export and load the state of a pruner
    # i.e. a actual sparse model or LotteryTicket that we want to train from
    # scratch now. Make sure that the architecture and the parameters match!
    pruner_state = pruner.state_dict()

    # reset seed for initializing with the same weights
    torch.manual_seed(args.seed_retrain)
    model_retrain = LotteryLeNet().to(device)
    optimizer_retrain = optim.SGD(model_retrain.parameters(),
                                  lr=args.learning_rate,
                                  momentum=args.momentum)
    cutoff_retrain = prunhild.cutoff.LocalRatioCutoff(args.cutoff_ratio)
    params_retrain = list(model_retrain.parameters())[:-1]
    pruner_retrain = prunhild.pruner.CutoffPruner(params_retrain,
                                                  cutoff_retrain)

    # now we load the state dictionary with the prune-masks that were used last
    # for pruning the model.
    pruner_retrain.load_state_dict(pruner_state)

    # calling prune with `update_state=False` will simply apply the last prune_mask
    # stored in state
    pruner_retrain.prune(update_state=False)

    logs_retrain = []

    print("Retraining Start")
    torch.manual_seed(args.seed_dataloader_retrain)
    for epoch in range(1, args.epochs + 1):
        logs_retrain += train(
            args,
            model_retrain,
            device,
            data_loaders,
            optimizer_retrain,
            pruner_retrain,
            epoch,
            prune=False,
        )
    print("\n\n\n")

    # ---- Pruning Weight Resetting End ---- #
    # -------------------------------------- #

    return logs_prune, logs_retrain
Exemplo n.º 11
0
            real_a = tensor2image_RGB(real_A[j, ...])
            real_b = tensor2image_RGB(real_B[j, ...])

            plt.subplot(221), plt.title("real_A"), plt.imshow(real_a)
            plt.subplot(222), plt.title("fake_B"), plt.imshow(fake_b)
            plt.subplot(223), plt.title("real_B"), plt.imshow(real_b)
            plt.subplot(224), plt.title("fake_A"), plt.imshow(fake_a)

            plt.savefig(os.path.join(des_pth, '%06d-%02d.jpg'%(i, j)))
        #break #-> debug

    print("≧◔◡◔≦ Congratulation! Successfully finishing the testing!")


if __name__ == '__main__':
    opt = parser.parse_args()
    opt.mode = 'test'

    #### opt setting
    table = PrettyTable(field_names=['config-name', 'config-value'])
    table.align['config-name'] = 'm'
    table.align['config-value'] = 'm'

    for k, v in sorted(vars(opt).items()):
        table.add_row([k, v])
    print(table.get_string(reversesort=True))

    #### testing
    test(opt)
Exemplo n.º 12
0
Arquivo: app.py Projeto: tlevine/at
import logging
import sqlite3
from datetime import datetime
from functools import wraps, partial
from multiprocessing import Manager, Process
from time import time

from werkzeug.contrib.fixers import ProxyFix
from flask import Flask, render_template, abort, g, \
    redirect, session, request, flash, url_for

import util
import updater
import queries
from config import parser
config = parser.parse_args()

active_devices = Manager().dict() # messy

if config.fake:
    from util import fake_now_at as now_at
else:
    now_at = updater.now_at

# Logging
sink = logging.StreamHandler() # stderr
if config.debug:
    sink.setLevel(logging.DEBUG)
else:
    sink.setLevel(logging.ERROR)
Exemplo n.º 13
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    # utils.saveargs(args)

    # initialize the checkpoint class
    # checkpoints = Checkpoints(args)

    # Create Model
    models = Model(args)
    model, criterion = models.setup()
    # print(model)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    acc_test_list = []
    inference_time_list = []
    acc_best = 0
    for epoch in range(args.nepochs):

        # train for a single epoch
        start_time_epoch = time.time()
        loss_train, acc_train = trainer.train(epoch, loaders)
        inference_time_start = time.time()
        loss_test, acc_test = tester.test(epoch, loaders)
        inference_time_list.append(
            np.round((time.time() - inference_time_start), 2))
        acc_test_list.append(acc_test)
        # if loss_best > loss_test:
        #     model_best = True
        #     loss_best = loss_test
        #     checkpoints.save(epoch, model, model_best)

        time_elapsed = np.round((time.time() - start_time_epoch), 2)

        # update the best test accu found so found
        if acc_test > acc_best:
            acc_best = acc_test

        print(
            "Epoch {}, train loss = {}, test accu = {}, best accu = {}, {} sec"
            .format(epoch, np.average(loss_train), acc_test, acc_best,
                    time_elapsed))

    # save the final model parameter
    # torch.save(model.state_dict(),
    #            "model_file/model%d.pth" % int(args.genome_id - 1))
    accuracy = np.mean(acc_test_list[-5:])
    inference_time = np.median(inference_time_list)

    # accuracy = acc_best
    fitness = [accuracy, inference_time]
    with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        pickle.dump(fitness, f)
Exemplo n.º 14
0
    def __init__(self, pathModel, save_plots, args):
        # names of diseases
        self.class_names = ['Atelectasis', 'Cardiomegaly',
                            'Effusion', 'Infiltrate',
                            'Mass', 'Nodule',
                            'Pneumonia', 'Pneumothorax']

        self.save_plots = save_plots
        self.save_location = os.path.join(os.getcwd(), "heatmap_output")
        self.args = args
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        num_GPU = torch.cuda.device_count()
        args = parser.parse_args()
        # ---- Initialize the network
        if args.global_pool == 'PCAM':
            model = PCAM_Model(args)
        else:
            model = select_model(args)

        # load the model to multiple GPUs if needed
        model = model.to(self.device)
        torch.cuda.memory_allocated()
        torch.cuda.max_memory_allocated()
        modelCheckpoint = torch.load(pathModel)
        if num_GPU > 1:
            model.module.load_state_dict(modelCheckpoint['model_state_dict'])
        else:
            model.load_state_dict(modelCheckpoint['model_state_dict'])

        self.model = model
        self.model.eval()

        if args.backbone == "densenet121":
            model_dict = dict(type='densenet',
                              layer_name='img_model_features_norm5',
                              arch=self.model,
                              input_size=(args.img_size, args.img_size)
                              )
        elif args.backbone == "ResNet18":
            model_dict = dict(type='resnet',
                              layer_name='img_model_layer4_bottleneck1_bn2',
                              arch=self.model,
                              input_size=(args.img_size, args.img_size)
                              )

        # Function that generate the heatmap with GradCAM
        self.GradCAM = Grad_CAM(model_dict)
        # Function that generate the heatmap with GradCAM++
        self.GradCAMCPP = Grad_CAMpp(model_dict)

        # The weight of CNN are extraced for Class activation map method
        if args.global_pool == "PCAM":
            pass
        else:
            if args.backbone == "densenet121":
                self.weights = list(self.model.FF.parameters())[-2].squeeze()
            elif args.backbone == "ResNet18":
                self.weights = list(model.img_model.fc[1].parameters())[-2].squeeze()

        # Function that preprocess the input images, same preprocessing used for training CNN.
        trans = transforms.Compose([
            transforms.Resize(args.img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        current_dict = os.getcwd()
        data_root_dir = os.path.join(current_dict, 'dataset')
        self.datasets = CXRDataset(data_root_dir, dataset_type='box', Num_classes=args.num_classes,
                                   img_size=args.img_size, transform=trans)
        self.dataloaders = DataLoader(self.datasets, batch_size=1, shuffle=True, num_workers=args.num_workers)
        self.iou_CAM = []
        self.iou_GradCAM = []
        self.iou_GradCAMCPP = []
Exemplo n.º 15
0
def consolidate(inputfile, path, dir_base_name, base_name):
    # Now combine all subsets into a macroset

    # 4 create a new data source and layer
    fn = dir_base_name + '-traced.shp'

    # 2 get the shapefile driver
    driver = ogr.GetDriverByName('ESRI Shapefile')

    # 3 open the input data source and get the layer
    shapefile = dir_base_name + '.shp'
    inDS = driver.Open(shapefile, 0) #shows cover at given points
    if inDS is None:
        print 'Could not open shapefile'
        sys.exit(1)
    inLayer = inDS.GetLayer()

    # 5 get the FieldDefn's for the id and cover fields in the input shapefile
    feature = inLayer.GetFeature(0)
    idFieldDefn = feature.GetFieldDefnRef('DN')

    if os.path.exists(fn):driver.DeleteDataSource(fn)
    outDS = driver.CreateDataSource(fn)
    if outDS is None:
        print 'Could not create final shapefile'
        sys.exit(1)
    outLayer = outDS.CreateLayer(base_name, geom_type=ogr.wkbPolygon)

    #create new field in the output shapefile
    outLayer.CreateField(idFieldDefn)

    # 6 get the FeatureDefn for the output layer
    featureDefn = outLayer.GetLayerDefn()

    # new field definitions for this shapefile
    # color definition
    colorDefn = ogr.FieldDefn("Color", ogr.OFTInteger)
    colorDefn.SetWidth(2)
    colorDefn.SetPrecision(0)
    outLayer.CreateField( colorDefn )

    # dot count definition
    dotCountDefn = ogr.FieldDefn("DotCount", ogr.OFTInteger)
    dotCountDefn.SetWidth(2)
    dotCountDefn.SetPrecision(0)
    outLayer.CreateField( dotCountDefn )

    # dot type definition
    dotTypeDefn = ogr.FieldDefn("DotType", ogr.OFTInteger)
    dotTypeDefn.SetWidth(1)
    dotTypeDefn.SetPrecision(0)
    outLayer.CreateField( dotTypeDefn )

    # cross count definition
    crossCountDefn = ogr.FieldDefn("CrossCount", ogr.OFTInteger)
    crossCountDefn.SetWidth(2)
    crossCountDefn.SetPrecision(0)
    outLayer.CreateField( crossCountDefn )

    # cross data definition
    crossDataDefn = ogr.FieldDefn("CrossData", ogr.OFTString)
    crossDataDefn.SetWidth(255)
    outLayer.CreateField( crossDataDefn )

    # add lat/lon as OFTReal attributes
    outLayer.CreateField(ogr.FieldDefn("CentroidY", ogr.OFTReal))
    outLayer.CreateField(ogr.FieldDefn("CentroidX", ogr.OFTReal))

    polygonfiles = []
    for files in os.listdir(path):
        if files.endswith(".shp") and files.find('-polygon') != -1:
            polygonfile = path + "/" + files
            # apply a projection so gdalwarp doesnt complain
            polygonfilename = files[:files.find(".shp")]
            os.system("cp " + dir_base_name + ".prj " + path + "/" + polygonfilename + ".prj")
            extractedfile = path + "/" + polygonfilename + "-extracted.tif"
            # extract bitmap from original
            command = "gdalwarp -q -t_srs EPSG:3785 -cutline " + polygonfile + " -crop_to_cutline -of GTiff " + inputfile + " " + extractedfile
            logging.debug(command)
            # print command
            os.system(command)
            # calculate color
            # shrink to 1x1 and find value
            # logging.debug( string.join(["convert", "-quiet", os.path.abspath(extractedfile), "-resize", "1x1","txt:-"]) )
            # pixelvalue = subprocess.Popen(["convert", "-quiet", os.path.abspath(extractedfile), "-resize", "1x1","txt:-"], stdout=subprocess.PIPE).communicate()[0]
            # pattern = re.compile(r"0,0: \(([\s0-9]*),([\s0-9]*),([\s0-9]*).*")
            # values = pattern.findall(pixelvalue)
            extractedpath = os.path.abspath(extractedfile)
            if os.path.exists(extractedpath) == False:
                continue
            values = average_color(extractedpath)
            if len(values) > 0:
                red = int(values[0])
                green = int(values[1])
                blue = int(values[2])
                nearest = 100000
                nearestcolor = []
                nearestcolorindex = -1

                args = parser.parse_args()
                basecolors = args.vectorize_config['basecolors']

                for i, color in enumerate(basecolors):
                    dred = (color[0] - red) * (color[0] - red)
                    dgreen = (color[1] - green) * (color[1] - green)
                    dblue = (color[2] - blue) * (color[2] - blue)
                    dist = dred + dgreen + dblue
                    if dist < nearest:
                        nearest = dist
                        nearestcolor = color
                        nearestcolorindex = i
                # only add if NOT paper
                if nearestcolor != basecolors[0]:
                    # check for dots
                    circle_data = cv_feature_detect(extractedfile)
                    # add to array
                    polygonfiles.append([polygonfile, nearestcolorindex, circle_data])
                else:
                    logging.debug("Ignored (paper color): " + polygonfilename + "\n")
            else:
                logging.debug("Ignored (regex match error): " + polygonfilename + "\n")

    for files in polygonfiles:
        # 3 open the input data source and get the layer
        tempfile = files[0] #dir_base_name + '-tmp-' + str(currentsubset) + '-traced.shp'
        inDS = driver.Open(tempfile, 0) #shows cover at given points
        if inDS is None:
            print 'Could not open temporary shapefile'
            break
        inLayer = inDS.GetLayer()

        # 7 loop through the input features
        inFeature = inLayer.GetNextFeature()
        while inFeature:
            # create a new feature
            outFeature = ogr.Feature(featureDefn) #using featureDefn created in step 6

            # set the geometry
            geom = inFeature.GetGeometryRef()
            outFeature.SetGeometry(geom) #move it to the new feature

            DN = inFeature.GetField('DN')
            outFeature.SetField('DN', DN ) #move it to the new feature

            outFeature.SetField('Color', int(files[1]) )

            outFeature.SetField('DotCount', int(files[2]["count"]) )

            outFeature.SetField('DotType', int(files[2]["is_outline"]) )

            outFeature.SetField('CrossCount', int(files[2]["cross_count"]) )

            outFeature.SetField('CrossData', str(files[2]["cross_data"]) )

            source_srs = osr.SpatialReference()
            source_srs.ImportFromEPSG(3785) # NOTE: notice this is hardcoded

            target_srs = osr.SpatialReference()
            target_srs.ImportFromEPSG(4326) # NOTE: notice this is hardcoded

            transform = osr.CoordinateTransformation(source_srs, target_srs)

            centroid = geom.Centroid()

            centroid.Transform(transform)

            outFeature.SetField('CentroidY', centroid.GetY())
            outFeature.SetField('CentroidX', centroid.GetX())

            # outFeature.SetField('circle_count', files[2]["circle_count"])
            # outFeature.SetField('circle_type', files[2]["is_outline"])
            # add the feature to the output layer
            outLayer.CreateFeature(outFeature)

            # destroy the output feature
            outFeature.Destroy()

            # destroy the input feature and get a new one
            inFeature.Destroy()
            inFeature = inLayer.GetNextFeature()

        # close the data sources
        inDS.Destroy()

    outDS.Destroy() #flush out the last changes here

    print ""
    print "Applying projection file to result..."
    print "-------------------------------------"
    os.system("cp " + dir_base_name + ".prj " + dir_base_name + "-traced.prj")
Exemplo n.º 16
0
def thresholdize(inputfile, dir_base_name):
    args = parser.parse_args()
    brightness = args.vectorize_config['brightness']
    contrast = args.vectorize_config['contrast']
    thresholdblack = args.vectorize_config['thresholdblack']
    thresholdwhite = args.vectorize_config['thresholdwhite']
    thresholdfile = dir_base_name + "-threshold-tmp.tif"
    gimp_path = args.gimp_path

    print "\n\n"
    print "Thresholdizing:"
    print "---------------"
    print inputfile + " into threshold file: " + thresholdfile

    contraststring = '(gimp-brightness-contrast drawable ' + str(brightness) + ' ' + str(contrast) + ')'
    thresholdstring = '(gimp-threshold drawable ' + str(thresholdblack) + ' ' + str(thresholdwhite) + ')'
    gimpcommand = '(let* ((image (car (file-tiff-load RUN-NONINTERACTIVE "' + inputfile + '" "' + inputfile + '"))) (drawable (car (gimp-image-get-layer-by-name image "Background")))) (gimp-selection-none image) ' + contraststring + ' ' + thresholdstring + ' (gimp-file-save RUN-NONINTERACTIVE image drawable "' + thresholdfile + '" "' + thresholdfile + '") (gimp-image-delete image))'

    if (not os.path.isfile(thresholdfile)):
        command = gimp_path + ' -i -b \'' + gimpcommand + '\' -b \'(gimp-quit 0)\''
        logging.debug(command)
        # print command
        os.system(command)

    outputwsg = dir_base_name + "-wsg-tmp.tif"
    outputgdal = dir_base_name + "-gdal-tmp.tif"

    # first get geotiff data from original
    logging.debug( string.join(["gdalinfo", os.path.abspath(inputfile)]) )
    geoText = subprocess.Popen(["gdalinfo", os.path.abspath(inputfile)], stdout=subprocess.PIPE).communicate()[0]
    pattern = re.compile(r"Upper Left\s*\(\s*([0-9\-\.]*),\s*([0-9\-\.]*).*\n.*\n.*\nLower Right\s*\(\s*([0-9\-\.]*),\s*([0-9\-\.]*).*")
    geoMatch = pattern.findall(geoText)
    # print pattern
    print "\n"
    print "Geodata obtained:"
    print "-----------------"
    print "W", geoMatch[0][0]
    print "N", geoMatch[0][1]
    print "E", geoMatch[0][2]
    print "S", geoMatch[0][3]
    print "\n"

    W = geoMatch[0][0]
    N = geoMatch[0][1]
    E = geoMatch[0][2]
    S = geoMatch[0][3]

    print "Applying to destination:"
    print "------------------------"
    # print outputgdal
    if (not os.path.isfile(outputwsg)):
        command = 'gdal_translate -a_srs "+proj=latlong +datum=WGS84" -of GTiff -co "INTERLEAVE=PIXEL" -a_ullr ' + W + ' ' + N + ' ' + E + ' ' + S + ' ' + thresholdfile + ' ' + outputwsg
        logging.debug(command)
        # print command
        os.system(command)

    print ""
    if (not os.path.isfile(outputgdal)):
        command = 'gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3785 -r bilinear ' + outputwsg + ' ' + outputgdal
        logging.debug(command)
        # print command
        os.system(command)
Exemplo n.º 17
0
def main():
    # parse the arguments
    args = parser.parse_args()
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    # utils.saveargs(args)

    # initialize the checkpoint class
    # checkpoints = Checkpoints(args)


    # Create Model
    models = Model(args)
    model, criterion, num_params = models.setup()
    model = calculate_flops.add_flops_counting_methods(model)
    # print(model)

    # Data Loading
    dataloader = Dataloader(args)
    loaders = dataloader.create()

    # The trainer handles the training loop
    trainer = Trainer(args, model, criterion)
    # The trainer handles the evaluation on validation set
    tester = Tester(args, model, criterion)

    # start training !!!
    loss_best = 1e10
    acc_test_list = []
    acc_best = 0
    for epoch in range(args.nepochs):

        # train for a single epoch
        start_time_epoch = time.time()
        if epoch == 0:
            model.start_flops_count()
        loss_train, acc_train = trainer.train(epoch, loaders)
        loss_test, acc_test = tester.test(epoch, loaders)
        acc_test_list.append(acc_test)
        # if loss_best > loss_test:
        #     model_best = True
        #     loss_best = loss_test
        #     checkpoints.save(epoch, model, model_best)

        time_elapsed = np.round((time.time() - start_time_epoch), 2)
        if epoch == 0:
            n_flops = (model.compute_average_flops_cost() / 1e6 / 2)
        # update the best test accu found so found
        if acc_test > acc_best:
            acc_best = acc_test

        if np.isnan(np.average(loss_train)):
            break

        print("Epoch {:d}:, test error={:0.2f}, FLOPs={:0.2f}M, n_params={:0.2f}M, {:0.2f} sec"
              .format(epoch, 100.0-acc_test, n_flops, num_params/1e6, time_elapsed))

    # save the final model parameter
    # torch.save(model.state_dict(),
    #            "model_file/model%d.pth" % int(args.genome_id - 1))
    error = 100 - np.mean(acc_test_list[-3:])

    # accuracy = acc_best
    fitness = [error, n_flops, num_params]
    with open("output_file/output%d.pkl" % int(args.genome_id - 1), "wb") as f:
        pickle.dump(fitness, f)
Exemplo n.º 18
0
def main():
    args = parser.parse_args()
    for i, (ff, inputfile) in enumerate(list_tiffs(args.inputfile)):
       process_file(ff, inputfile)
       print('Processed %d file(s)' % (i+1))
Exemplo n.º 19
0
def main():

    global args, best_prec1

    args = parser.parse_args()

    if not os.path.exists('./record'):
        os.mkdir('./record')

    if args.dataset == 'ucf101':
        num_class = 101
    elif args.dataset == 'hmdb51':
        num_class = 51
    elif args.dataset == 'kinetics':
        num_class = 400
    elif args.dataset == 'sthsth':
        num_class = 174
    else:
        raise ValueError('Unknown dataset ' + args.dataset)

    model = SlowFastNet(num_class)
    train_augmentation = get_augmentation('RGB', input_size)
    model = torch.nn.DataParallel(model).cuda()

    args.start_epoch=0
    if args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint '{}' (epoch {})"
                   .format(args.evaluate, checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))

    cudnn.benchmark = True

    normalize = torchvision.transforms.Compose([GroupNormalize(input_mean, input_std),f2Dt3D()])

    train_loader = torch.utils.data.DataLoader(
        VideoDataset(args.root_path, args.train_list,
                     transform=torchvision.transforms.Compose([
                         train_augmentation,
                         Stack(roll=False),
                         ToTorchFormatTensor(div=True),
                         normalize,
                     ]), mode='train', T=args.T, tau=args.tau, dense_sample=not args.no_dense_sample),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        VideoDataset(args.root_path, args.val_list,
                     transform=torchvision.transforms.Compose([
                         GroupScale(int(scale_size)),
                         GroupCenterCrop(input_size),
                         Stack(roll=False),
                         ToTorchFormatTensor(div=True),
                         normalize,
                     ]), mode='test', T=args.T, tau=args.tau, dense_sample=not args.no_dense_sample),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    schduler = WarmUpMultiStepLR(optimizer, [20, 30, 40], 0.1, last_epoch=args.start_epoch-1)

    # the way in the raw paper ,But I do not use it, because I can't estimate how many iter to train
    # max_step = len(train_loader)*args.epochs
    # lr_lambda = lambda step: 0.5 * args.lr* ((np.cos(step / max_step * np.pi)) + 1)
    # scheduler = torch.nn.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[lr_lambda])
    if args.evaluate:
        validate(val_loader, model, criterion, 0)
        return

    for epoch in range(args.start_epoch, args.epochs):
        schduler.step()
        print('Epoch {}/{}'.format(epoch + 1, args.epochs))
        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)
        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best, epoch + 1)
Exemplo n.º 20
0
def main():
    args = parser.parse_args()

    torch.backends.cudnn.benchmark = True

    if args.dataset == 'ucf101':
        num_class = 101
    elif args.dataset == 'hmdb51':
        num_class = 51
    elif args.dataset == 'kinetics':
        num_class = 400
    elif args.dataset =='sthsth':
        num_class=174
    else:
        raise ValueError('Unknown dataset' + args.dataset)

    RGBmodel = TSN(num_class, args.num_segments, 'RGB',
                base_model=args.arch, consensus_type=args.consensus_type,
                dropout=args.dropout, partial_bn=not args.nopartial_bn).to(device)
    RGBDiffmodel = TSN(num_class, args.num_segments, 'RGBDiff',
               base_model=args.arch, consensus_type=args.consensus_type,
               dropout=args.dropout, partial_bn=not args.nopartial_bn).to(device)
    # RGBDiffmodel = TSN(num_class, args.num_segments, 'Flow',
    #             base_model=args.arch, consensus_type=args.consensus_type,
    #             dropout=args.dropout, partial_bn=not args.nopartial_bn).to(device)
    checkpoint = torch.load('./record/RGBbest.pth')
    RGBmodel.load_state_dict(checkpoint['state_dict'])
    checkpoint = torch.load('./record/RGBDiffbest.pth')
    RGBDiffmodel.load_state_dict(checkpoint['state_dict'])
    # checkpoint = torch.load('./record/Flow/Flowbest.pth')
    # RGBDiffmodel.load_state_dict(checkpoint['state_dict'])

    crop_size = RGBmodel.crop_size
    scale_size = RGBmodel.scale_size
    input_mean = RGBmodel.input_mean
    input_std = RGBmodel.input_std

    RGB_loader = torch.utils.data.DataLoader(
        TSNDataSet(args.root_path, args.val_list, num_segments=args.num_segments,
                   new_length=1,
                   modality='RGB',
                   image_tmpl="{:05d}.jpg" ,
                   random_shift=False,
                   test_mode=True,
                   On_Video=True,
                   interval=1,
                   transform=torchvision.transforms.Compose([
                       GroupScale(int(scale_size)),
                       GroupCenterCrop(crop_size),
                       Stack(roll=args.arch == 'BNInception'),
                       ToTorchFormatTensor(div=args.arch != 'BNInception'),
                       GroupNormalize(input_mean, input_std)
                   ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    RGBDiff_loader = torch.utils.data.DataLoader(
        TSNDataSet(args.root_path, args.val_list, num_segments=args.num_segments,
                   new_length=5,
                   modality='RGBDiff',
                   image_tmpl="{:05d}.jpg" if args.modality in ["RGB","RGBDiff"]  else "{}/{}/frame{:06d}.jpg",
                   test_mode=True,
                   random_shift=False,
                   On_Video=True,
                   interval=2,
                   transform=torchvision.transforms.Compose([
                       GroupScale(int(scale_size)),
                       GroupCenterCrop(crop_size),
                       Stack(roll=args.arch == 'BNInception'),
                       ToTorchFormatTensor(div=args.arch != 'BNInception')
                   ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)


    #actually this is the Flow loader.i am lazy to change the name.
    # RGBDiff_loader = torch.utils.data.DataLoader(
    #     TSNDataSet('/home/qx/project/data/UCF101/tvl1_flow/', args.val_list, num_segments=args.num_segments,
    #                new_length=5,
    #                modality='Flow',
    #                image_tmpl="{}/{}/frame{:06d}.jpg",
    #                random_shift=False,
    #                test_mode=True,
    #                On_Video=False,
    #                interval=2,
    #                transform=torchvision.transforms.Compose([
    #                    GroupScale(int(scale_size)),
    #                    GroupCenterCrop(crop_size),
    #                    Stack(roll=args.arch == 'BNInception'),
    #                    ToTorchFormatTensor(div=args.arch != 'BNInception'),
    #                    GroupNormalize(input_mean, input_std)
    #                ])),
    #     batch_size=args.batch_size, shuffle=False,
    #     num_workers=args.workers, pin_memory=True)

    RGBmodel.eval()
    RGBDiffmodel.eval()
    epoch_prec1 = 0
    epoch_prec5 = 0

    class_num = np.array([0] * num_class)
    class_prec1 = np.array([0] * num_class)
    class_prec5 = np.array([0] * num_class)

    with torch.no_grad():
        for (RGBdata, target), (RGBDiffdata, _) in zip(RGB_loader, RGBDiff_loader):
            #print(RGBdata.shape,RGBDiffdata.shape)
            target = target.to(device)
            RGBDiffdata = RGBDiffdata.to(device)
            RGBdata = RGBdata.to(device)
            RGBoutput = RGBmodel(RGBdata)
            RGBDiffoutput = RGBDiffmodel(RGBDiffdata)
            #print(RGBoutput.shape,RGBDiffoutput.shape)
            output = RGBoutput+RGBDiffoutput

            # _,pred = output.topk(5, 1, True, True)
            # pred=pred.cpu().numpy()
            # target=target.cpu().numpy()
            # for i in range(args.batch_size):
            #     pred_name=index2name(pred[i],'./raw/classInd.txt')
            #     true_name=index2name(target[i:i+1],'./raw/classInd.txt')
            #     print('pred name:',pred_name,'true name:',true_name)


            (prec1, prec5), (class_prec1_t, class_prec5_t), class_num_t \
                = class_accuracy(output.data, target, num_class, topk=(1, 5))

            class_num += class_num_t
            class_prec1 += class_prec1_t
            class_prec5 += class_prec5_t

            #prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            epoch_prec1 += prec1.item() * target.size(0)
            epoch_prec5 += prec5.item() * target.size(0)


        epoch_prec1 = 1.0 * epoch_prec1 / len(RGBDiff_loader.dataset)
        epoch_prec5 = 1.0 * epoch_prec5 / len(RGBDiff_loader.dataset)

    print("Accuracy top1: {} top5:{}".format(epoch_prec1, epoch_prec5))

    sorted_name=index2name(np.argsort(class_prec1 / class_num),'./raw/classInd.txt')
    sorted_score=np.sort(class_prec1 / class_num)
    for i in range(num_class):
        print(sorted_name[i],sorted_score[i])

    print()

    sorted_name = index2name(np.argsort(class_prec5 / class_num), './raw/classInd.txt')
    sorted_score = np.sort(class_prec5 / class_num)
    for i in range(num_class):
        print(sorted_name[i], sorted_score[i])
Exemplo n.º 21
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    if not os.path.exists('./record'):
        os.mkdir('./record')

    torch.backends.cudnn.benchmark = True

    if args.dataset == 'ucf101':
        num_class = 101
    elif args.dataset == 'hmdb51':
        num_class = 51
    elif args.dataset == 'kinetics':
        num_class = 400
    elif args.dataset == 'sthsth':
        num_class = 174
    else:
        raise ValueError('Unknown dataset' + args.dataset)

    model = TSN(num_class,
                args.num_segments,
                args.modality,
                base_model=args.arch,
                consensus_type=args.consensus_type,
                dropout=args.dropout,
                partial_bn=not args.nopartial_bn)

    if args.gpus > 1:
        model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
    else:
        model = model.to(device)

    # from torchsummary import summary
    # summary(model, input_size=(54, 224, 224))

    crop_size = model.crop_size
    scale_size = model.scale_size
    input_mean = model.input_mean
    input_std = model.input_std
    policies = model.get_optim_policies()
    train_augmentation = model.get_augmentation()  # 包括宽高比抖动、水平翻转
    start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint {} at epoch {}"). \
                  format(args.resume, start_epoch + 1))
        else:
            print("=> no checkpoint found at {}".format(args.resume))

    if args.modality != 'RGBDiff':
        normalize = GroupNormalize(input_mean, input_std)
    else:
        normalize = IdentityTransform()

    if args.modality == 'RGB':
        data_length = 1
    else:
        data_length = 5

    train_loader = torch.utils.data.DataLoader(TSNDataSet(
        args.root_path,
        args.train_list,
        num_segments=args.num_segments,
        new_length=data_length,
        modality=args.modality,
        On_Video=args.On_Video,
        interval=args.interval,
        image_tmpl="{:05d}.jpg"
        if args.modality in ["RGB", "RGBDiff"] else "{}/{}/frame{:06d}.jpg",
        transform=torchvision.transforms.Compose([
            train_augmentation,
            Stack(roll=args.arch == 'BNInception'),
            ToTorchFormatTensor(div=args.arch != 'BNInception'),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(TSNDataSet(
        args.root_path,
        args.val_list,
        num_segments=args.num_segments,
        new_length=data_length,
        modality=args.modality,
        On_Video=args.On_Video,
        interval=args.interval,
        image_tmpl="{:05d}.jpg"
        if args.modality in ["RGB", "RGBDiff"] else "{}/{}/frame{:06d}.jpg",
        random_shift=False,
        transform=torchvision.transforms.Compose([
            GroupScale(int(scale_size)),
            GroupCenterCrop(crop_size),
            Stack(roll=args.arch == 'BNInception'),
            ToTorchFormatTensor(div=args.arch != 'BNInception'),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    '''
    train_loader = torch.utils.data.DataLoader(
        mydataset(),
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(
        mydataset(),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)
    '''

    criterion = torch.nn.CrossEntropyLoss().to(device)

    for group in policies:
        print('group: {} has {} params lr_mult: {}'.format(
            group['name'], len(group['params']), group['lr_mult']))

    optimizer = optim.SGD(policies,
                          args.lr,
                          momentum=0.9,
                          weight_decay=args.weight_decay)

    if args.evaluate:
        test(val_loader, model, num_class)
        return

    best_epoch = start_epoch
    for epoch in range(start_epoch, args.epochs):

        print('Epoch {}/{}'.format(epoch + 1, args.epochs))
        train(args.record_path, train_loader, model, criterion, optimizer,
              epoch, args.clip_gradient)
        prec1 = validate(args.record_path, val_loader, model, criterion, epoch)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)

        if is_best:
            filename = args.record_path + args.modality + 'best.pth'
            best_epoch = epoch
        else:
            filename = args.record_path + args.modality + str(epoch) + '.pth'

        torch.save(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1
            }, filename)
        if epoch - best_epoch > 10:
            return
        elif epoch - best_epoch > 5:
            print('epoch {} best epoch{}'.format(epoch + 1, best_epoch + 1))
            args.lr = utils.adjust_learning_rate(args.lr, optimizer)