print('开始导出数据')
        front_rear_parking_radar, driving_assistance_image, driving_mode, rels_motorcycle_parking_radar, rels_motorcycle_driving_assistance, rels_motorcycle_driving_mode = self.read_nodes()

        f_1 = open('../dict/front_rear_parking_radar.txt', 'w+', encoding='utf-8')
        f_1.seek(0)
        f_1.truncate()  # 清空文件
        f_1.write('\n'.join(list(front_rear_parking_radar)))
        f_1.close()

        f_2 = open('../dict/driving_assistance_image.txt', 'w+', encoding='utf-8')
        f_2.seek(0)
        f_2.truncate()  # 清空文件
        f_2.write('\n'.join(list(driving_assistance_image)))
        f_2.close()

        f_3 = open('../dict/driving_mode.txt', 'w+', encoding='utf-8')
        f_3.seek(0)
        f_3.truncate()  # 清空文件
        f_3.write('\n'.join(list(driving_mode)))
        f_3.close()




if __name__ == '__main__':
    sys.stdout = logger.Logger()
    handler = AssistOperateGraph()
    handler.create_graphnodes()
    handler.create_graphrels()
    handler.export_data()
def main():
    parser = argparse.ArgumentParser()

    # GSN settings
    parser.add_argument('--layers', type=int,
                        default=3)  # number of hidden layers
    parser.add_argument('--walkbacks', type=int,
                        default=5)  # number of walkbacks
    parser.add_argument('--hidden_size', type=int, default=1500)
    parser.add_argument('--hidden_act', type=str, default='tanh')
    parser.add_argument('--visible_act', type=str, default='sigmoid')

    # training
    parser.add_argument(
        '--cost_funct', type=str,
        default='binary_crossentropy')  # the cost function for training
    parser.add_argument('--n_epoch', type=int, default=500)
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument(
        '--save_frequency', type=int,
        default=5)  #number of epochs between parameters being saved
    parser.add_argument('--early_stop_threshold', type=float, default=0.9995)
    parser.add_argument('--early_stop_length', type=int,
                        default=30)  #the patience number of epochs

    # noise
    parser.add_argument('--hidden_add_noise_sigma', type=float,
                        default=2)  #default=2
    parser.add_argument('--input_salt_and_pepper', type=float,
                        default=0.4)  #default=0.4

    # hyper parameters
    parser.add_argument('--learning_rate', type=float, default=0.25)
    parser.add_argument('--momentum', type=float, default=0.5)
    parser.add_argument('--annealing', type=float, default=0.995)
    parser.add_argument('--noise_annealing', type=float, default=1)

    # data
    parser.add_argument('--dataset', type=str, default='MNIST')
    parser.add_argument('--data_path', type=str, default='../data/')
    parser.add_argument('--classes', type=int, default=10)
    parser.add_argument('--output_path', type=str, default='../outputs/gsn/')

    # argparse does not deal with booleans
    parser.add_argument('--vis_init', type=int, default=0)
    parser.add_argument('--noiseless_h1', type=int, default=1)
    parser.add_argument('--input_sampling', type=int, default=1)
    parser.add_argument('--test_model', type=int, default=0)
    parser.add_argument('--continue_training', type=int, default=0)  #default=0

    args = parser.parse_args()

    ########################################
    # Initialization things with arguments #
    ########################################
    outdir = args.output_path + "/" + args.dataset + "/"
    data.mkdir_p(outdir)
    args.output_path = outdir

    # Create the logger
    logger = log.Logger(outdir)
    logger.log("---------CREATING GSN------------\n\n")
    logger.log(args)

    # See if we should load args from a previous config file (during testing)
    config_filename = outdir + 'config'
    if args.test_model and 'config' in os.listdir(outdir):
        config_vals = load_from_config(config_filename)
        for CV in config_vals:
            logger.log(CV)
            if CV.startswith('test'):
                logger.log('Do not override testing switch')
                continue
            try:
                exec('args.' + CV) in globals(), locals()
            except:
                exec('args.' + CV.split('=')[0] + "='" + CV.split('=')[1] +
                     "'") in globals(), locals()
    else:
        # Save the current configuration
        # Useful for logs/experiments
        logger.log('Saving config')
        with open(config_filename, 'w') as f:
            f.write(str(args))

    ######################################
    # Load the data, train = train+valid #
    ######################################
    if args.dataset.lower() == 'mnist':
        (train_X,
         train_Y), (valid_X,
                    valid_Y), (test_X,
                               test_Y) = data.load_mnist(args.data_path)
        train_X = numpy.concatenate((train_X, valid_X))
        train_Y = numpy.concatenate((train_Y, valid_Y))
    else:
        raise AssertionError(
            "Dataset not recognized. Please try MNIST, or implement your own data processing method in data_tools.py"
        )

    # transfer the datasets into theano shared variables
    train_X, train_Y = data.shared_dataset((train_X, train_Y), borrow=True)
    valid_X, valid_Y = data.shared_dataset((valid_X, valid_Y), borrow=True)
    test_X, test_Y = data.shared_dataset((test_X, test_Y), borrow=True)

    ##########################
    # Initialize the new GSN #
    ##########################
    gsn = GSN(train_X, valid_X, test_X, vars(args), logger)
    #     gsn.train()

    gsn.load_params('gsn_params_mnist.pkl')
    gsn.gen_10k_samples()
    # parzen
    print 'Evaluating parzen window'
    import utils.likelihood_estimation as ll
    ll.main(0.20, 'mnist', '../data/', 'samples.npy')
예제 #3
0
import utils.logger as logger
import sendrequest as req
import random
import string
import sys

sys.path.append('../')

from utils.db import DatabaseUpdate
from utils.config import get_value
from core.login import APILogin

dbupdate = DatabaseUpdate()
api_logger = logger.Logger()
api_login = APILogin()


def generate_list(length, type):
    # Generate different possible param value for brute force
    lis = []
    if type == 'int':
        length = '%0' + str(length) + 'd'
        lis = [length % x for x in range(50)]
    elif type == 'str':
        for a in range(1, 50):
            lis += [''.join(
                random.choice(string.ascii_letters) for i in range(length))]
    return lis


def brute_force(url, method, headers, body, attack_params, scanid):
예제 #4
0
                        dest="verbose",
                        action="store_true",
                        help="Verbose mode")
    parser.add_argument(
        "-b",
        "--only-blocks",
        dest="only_blocks",
        action="store_true",
        help="Only remove PADDING, PICTURE and SEEKTABLE blocks")
    parser.add_argument("-l",
                        "--list-tags",
                        dest="list_tags",
                        action="store_true",
                        help="Get a list of all tags across all files")
    args = parser.parse_args()
    LOGGER = logger.Logger(args.verbose)

    # Sanity checks
    common.ensure_exist(["metaflac"])
    if args.input.exists() is False:
        common.abort(parser.format_help())

    # Get files list
    files = common.walk_directory(args.input.resolve(),
                                  lambda x: x.suffix == ".flac")
    queue = common.as_queue(files)
    LOGGER.log(
        f"{common.COLOR_WHITE}[+] {len(files)} file{'s' if len(files) != 1 else ''} to consider"
    )

    if args.list_tags is True:
예제 #5
0
파일: run_stage3.py 프로젝트: wymGAKKI/saps
import torch, sys
sys.path.append('.')
from options import stage3_opts
from utils import logger, recorders
from datasets import custom_data_loader
from models import custom_model, solver_utils, model_utils
from options import run_model_opts
import train_stage3 as train_utils
import test_stage3 as test_utils

args = run_model_opts.RunModelOpts().parse()
args = stage3_opts.TrainOpts().parse()
log = logger.Logger(args)


#### CUDA_VISIBLE_DEVICES=0 python main_stage3.py --retrain "/home/wym/code/SDPS-Net/data/models/LCNet_CVPR2019.pth.tar" --retrain_s2 "/home/wym/code/SDPS-Net/data/models/NENet_CVPR2019.pth.tar"
def main(args):
    model = custom_model.buildModelStage3(args)

    recorder = recorders.Records(args.log_dir)
    val_loader = custom_data_loader.benchmarkLoader(args)
    #train_loader, val_loader = custom_data_loader.reflectanceDataloader(args)
    test_utils.testOnBm(args, 'val', val_loader, model, log, 1, recorder)
    log.plotCurves(recorder, 'val')


if __name__ == '__main__':
    torch.manual_seed(args.seed)
    main(args)
예제 #6
0
def main():
    TrainImgLoader = torch.utils.data.DataLoader(
         data_inuse, 
         batch_size= batch_size, shuffle= True, num_workers=int(worker_mul*batch_size), drop_last=True, worker_init_fn=_init_fn, pin_memory=True)
    log = logger.Logger(args.savemodel, name=args.logname)
    start_full_time = time.time()
    global total_iters

    # training loop
    for batch_idx, databatch in enumerate(TrainImgLoader):
        if batch_idx > args.niter: break
        if 'expansion' in args.stage:
            imgL_crop, imgR_crop, flowl0,imgAux,intr, imgoL, imgoR, occp  = databatch
        else:
            imgL_crop, imgR_crop, flowl0 = databatch
            imgAux,intr, imgoL, imgoR, occp = None,None,None,None,None
        if batch_idx % 100 == 0:
            adjust_learning_rate(optimizer,total_iters)
        if total_iters < 1000 and not 'expansion' in args.stage:
            # subtract mean
            mean_L.append( np.asarray(imgL_crop.mean(0).mean(1).mean(1)) )
            mean_R.append( np.asarray(imgR_crop.mean(0).mean(1).mean(1)) )
        imgL_crop -= torch.from_numpy(np.asarray(mean_L).mean(0)[np.newaxis,:,np.newaxis, np.newaxis]).float()
        imgR_crop -= torch.from_numpy(np.asarray(mean_R).mean(0)[np.newaxis,:,np.newaxis, np.newaxis]).float()

        start_time = time.time() 
        loss,vis = train(imgL_crop,imgR_crop, flowl0, imgAux,intr, imgoL, imgoR, occp)
        print('Iter %d training loss = %.3f , time = %.2f' %(batch_idx, loss, time.time() - start_time))

        if total_iters %10 == 0:
            log.scalar_summary('train/loss_batch',loss, total_iters)
            log.scalar_summary('train/aepe_batch',vis['AEPE'], total_iters)
        if total_iters %100 == 0:
            log.image_summary('train/left',imgL_crop[0:1],total_iters)
            log.image_summary('train/right',imgR_crop[0:1],total_iters)
            if len(np.asarray(vis['gt']))>0:
                log.histo_summary('train/gt_hist',np.asarray(vis['gt']).reshape(-1,3)[np.asarray(vis['gt'])[:,:,:,-1].flatten().astype(bool)][:,:2], total_iters)
            gu = vis['gt'][0,:,:,0]; gv = vis['gt'][0,:,:,1]
            gu = gu*np.asarray(vis['mask'][0].float().cpu());  gv = gv*np.asarray(vis['mask'][0].float().cpu())
            mask = vis['mask'][0].float().cpu()
            log.image_summary('train/gt0', flow_to_image(np.concatenate((gu[:,:,np.newaxis],gv[:,:,np.newaxis],mask[:,:,np.newaxis]),-1))[np.newaxis],total_iters)
            log.image_summary('train/output2',flow_to_image(vis['output2'][0].transpose((1,2,0)))[np.newaxis],total_iters)
            log.image_summary('train/output3',flow_to_image(vis['output3'][0].transpose((1,2,0)))[np.newaxis],total_iters)
            log.image_summary('train/output4',flow_to_image(vis['output4'][0].transpose((1,2,0)))[np.newaxis],total_iters)
            log.image_summary('train/output5',flow_to_image(vis['output5'][0].transpose((1,2,0)))[np.newaxis],total_iters)
            log.image_summary('train/output6',flow_to_image(vis['output6'][0].transpose((1,2,0)))[np.newaxis],total_iters)
            if 'expansion' in args.stage:
                log.image_summary('train/mid_gt',(1+imgAux[:1,:,:,6]/imgAux[:1,:,:,0]).log() ,total_iters)
                log.image_summary('train/mid',vis['mid'][np.newaxis],total_iters)
                log.image_summary('train/exp',vis['exp'][np.newaxis],total_iters)
            torch.cuda.empty_cache()
        total_iters += 1
        # get global counts                
        with open('%s/iter_counts-%d.txt'%(args.itersave,int(args.logname.split('-')[-1])), 'w') as f:
            f.write('%d'%total_iters)

        if (total_iters + 1)%2000==0:
            #SAVE
            savefilename = args.savemodel+'/'+args.logname+'/finetune_'+str(total_iters)+'.pth'
            save_dict = model.state_dict()
            save_dict = collections.OrderedDict({k:v for k,v in save_dict.items() if ('reg_modules' not in k or 'conv1' in k) and ('grid' not in k) and ('flow_reg' not in k)})
            torch.save({
                'iters': total_iters,
                'state_dict': save_dict,
                'mean_L': mean_L,
                'mean_R': mean_R,
            }, savefilename)
        
    print('full finetune time = %.2f HR' %((time.time() - start_full_time)/3600))
    print(max_epo)
예제 #7
0
import elasticsearch_dsl as esdsl
import json
from pathlib import Path
from datetime import datetime
import time

from get_data.src import s3_utils
from get_data.src import es_utils
from get_data.src import utils_fct
from get_data.src import get_from_db
from conf.cluster_conf import ES_INDEX, ES_HOST_PORT, ES_HOST_IP, BUCKET_NAME
from utils import logger

log = logger.Logger().create(logger_name=__name__)


def _get_img_and_label_to_delete_from_file(label_file):
    d_label = utils_fct.get_label_dict_from_file(label_file)
    if d_label is None:
        return 0, 0
    l_to_delete = utils_fct.remove_label_to_delete_from_dict(d_label)
    l_label_to_delete = [label["label_fingerprint"] for label in l_to_delete]
    l_img_delete = [(label["img_id"], label["s3_key"])
                    for label in l_to_delete]
    return l_img_delete, l_label_to_delete


def _user_ok_for_deletion(nb_of_img_to_delete,
                          delete_local=False,
                          label_only=False):
    ok = None
예제 #8
0
                100. * (batch_idx+1) / len(train_loader), loss.item(),
                train_dice0, train_dice1, train_dice2,
                metrics.T(output, target), metrics.P(output, target), metrics.TP(output, target)))

    logger.scalar_summary('train_loss', float(train_loss), epoch)
    logger.scalar_summary('train_dice0', float(train_dice0), epoch)
    logger.scalar_summary('train_dice1', float(train_dice1), epoch)
    logger.scalar_summary('train_dice2', float(train_dice2), epoch)


if __name__ == '__main__':
    args = config.args
    device = torch.device('cpu' if args.cpu else 'cuda')
    # data info
    train_set = Lits_DataSet(args.crop_size, args.resize_scale, args.dataset_path, mode='train')
    val_set = Lits_DataSet(args.crop_size, args.resize_scale, args.dataset_path, mode='val')
    train_loader = DataLoader(dataset=train_set,batch_size=args.batch_size,num_workers=1, shuffle=True)
    val_loader = DataLoader(dataset=val_set,batch_size=args.batch_size,num_workers=1, shuffle=True)
    # model info
    model = UNet(1, [32, 48, 64, 96, 128], 3, net_mode='3d',conv_block=RecombinationBlock).to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
    init_util.print_network(model)
    # model = nn.DataParallel(model, device_ids=[0,1])  # multi-GPU

    logger = logger.Logger('./output/{}'.format(args.save))
    for epoch in range(1, args.epochs + 1):
        common.adjust_learning_rate(optimizer, epoch, args)
        train(model, train_loader, optimizer, epoch, logger)
        val(model, val_loader, epoch, logger)
        torch.save(model, './output/{}/state.pkl'.format(args.save))  # Save model with parameters
        # torch.save(model.state_dict(), './output/{}/param.pkl'.format(args.save))  # Only save parameters
예제 #9
0
else:
    scheduler_warmup = None
    if args.scheduler_type == 'plateau':
        step_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                                    mode='min',
                                                                    factor=0.1,
                                                                    patience=5)
    elif args.scheduler_type == 'step':
        step_scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer,
            step_size=args.scheduler_step,
            gamma=args.scheduler_gamma)
    else:
        step_scheduler = None

metrics_logger = logger.Logger(args)
loss_logger = logger.LossLogger(args.model_name)

for epoch in range(1, args.epochs + 1):
    print('======================== {} ========================'.format(epoch))
    for param_group in optimizer.param_groups:
        print('LR: {}'.format(param_group['lr']))

    train_loader.dataset.epoch = epoch
    ################### Train #################
    all_preds, all_targs, all_masks, all_ids, train_loss, train_loss_unk = run_epoch(
        args,
        model,
        train_loader,
        optimizer,
        epoch,
예제 #10
0
def get_modelica2GPU_configuration(config_file):
    """ Estrapola le informazioni dal file di configurazione """
    try:
        config_dict = yaml.load(open(config_file))
        # START LOG
        msg = f"Controllo della struttura del configuratore {config_file}"
        tmp_logger.debug(msg, msg)
        # END LOG
        yc = yamlchecker.YAMLChecker(config_file)
        msg, result = yc.check_correctness()
        if not result:
            print(msg)
            sys.exit(1)
        # START LOG
        msg = f"Controllo andato a buon fine"
        tmp_logger.debug(msg, msg)
        # END LOG
        # START LOG
        msg = f"Ottenimento delle informazioni necessarie da {config_file}"
        tmp_logger.debug(msg, msg)
        # END LOG
        # Una volta fatto il check della struttura del configuratore estraggo le informazioni necessarie
        m2g_conf, builder_config = config_dict['modelica2gpu'], config_dict[
            'builder']
        genXML = m2g_conf['generateXML']
        xmlfile = createXML(
            m2g_conf['workingdir'], m2g_conf['modelfilename'],
            m2g_conf['omlibrary']) if genXML else m2g_conf['xml']
        notifier, filelogger = m2g_conf['notifier'], m2g_conf['filelogger']
        modelname = m2g_conf['modelfilename'][:-3]
        event_num, state_num = getnumevents(xmlfile), getnumstate(xmlfile)
        builder_options = [builder_config['MPGOSsourcedir']]
        device = None

        # Controlliamo che il parametro eventDirection sia settato nel modo corretto
        if builder_config['modeldefinition']['eventDirection'] is not None:
            # Check del parametro eventDirection
            check_multiple_config(
                "Il parametro eventDirection è None nonostante ci sono eventi, oppure viceversa",
                "Il numero di parametri eventDirection non matcha con il numero di eventi rilevati",
                "Tutti i parametri devono essere di tipo int", builder_config,
                'eventDirection', event_num, int)

        # Controlliamo che il parametro tolerance sia settato nel modo corretto
        if builder_config['modeldefinition']['tolerance'] is not None:
            # Check del parametro tolerance
            check_multiple_config(
                "Il parametro tolerance è None nonostante ci sono stati, oppure viceversa",
                "Il numero di parametri tolerance non matcha con il numero di stati rilevati",
                "Tutti i parametri devono essere di tipo float",
                builder_config, 'tolerance', state_num, float)

        builder_options += [
            builder_config['gpu']['major'], builder_config['gpu']['minor'],
            builder_config['modeldefinition']['numberOfThreads'],
            builder_config['modeldefinition']['numberOfProblems'],
            builder_config['modeldefinition']['numberOfDenseOutput'],
            builder_config['modeldefinition']['threadsPerBlock'],
            builder_config['modeldefinition']['initialTimeStep'],
            builder_config['modeldefinition']['preferSharedMemory'],
            builder_config['modeldefinition']['maximumTimeStep'],
            builder_config['modeldefinition']['minimumTimeStep'],
            list(builder_config['modeldefinition']['eventDirection'].values())
            if builder_config['modeldefinition']['eventDirection'] is not None
            else None,
            builder_config['modeldefinition']['denseOutputMinimumTimeStep'],
            builder_config['modeldefinition']['denseOutputSaveFrequency'],
            list(builder_config['modeldefinition']['tolerance'].values())
            if builder_config['modeldefinition']['tolerance'] is not None else
            None, builder_config['modeldefinition']['timeDomainInit'],
            builder_config['modeldefinition']['timeDomainEnd']
        ]
        if builder_config['usedefaultoptions']:
            # Per tutti quei valori pari a null inseriamo il valore di default, mentre per tutti gli
            # altri prendiamo quello inserito. Questo serve per evitare che l'utente nel momento in cui deve
            # cambiare un singolo parametro non debba rimetterli tutti. Questo fa in modo che l'utente inserisca
            # solo quello voluto e settando tutti gli altri a default.
            default_opts = getdefaultoptions(event_num, state_num, xmlfile)
            for idx, value in enumerate(builder_options):
                if value is None:
                    builder_options[idx] = default_opts[0][idx - 1]

        device = gpu_from_capability((builder_options[1], builder_options[2]))

        config_dict = {
            "xmlfile": xmlfile,
            "workingdir": m2g_conf['workingdir'],
            "modelname": modelname,
            "notifier": notifier,
            "filelogger": filelogger,
            "MPGOSsourcedir": builder_options[0],
            "GPU information": device,
            "numberOfThreads": builder_options[3],
            "numberOfProblems": builder_options[4],
            "numberOfDenseOutput": builder_options[5],
            "threadsPerBlock": builder_options[6],
            "initialTimeStep": builder_options[7],
            "preferSharedMemory": builder_options[8],
            "maximumTimeStep": builder_options[9],
            "minimumTimeStep": builder_options[10],
            "eventDirection": builder_options[11],
            "denseOutputMinimumTimeStep": builder_options[12],
            "denseOutputSaveFrequency": builder_options[13],
            "tolerance": builder_options[14],
            "timeDomainStart": builder_options[15],
            "timeDomainEnd": builder_options[16],
            "numberOfContinuousState": getnumstate(xmlfile)
        }

        conf_str = conf_dict2str(config_dict, device.get_attributes())
        print(conf_str)

        print()
        check = False
        while not check:
            ans = input(
                "Vuoi continuare con le seguenti configurazioni: [Y/N] ")
            if ans.upper() == "Y":
                check = True
            elif ans.upper() == "N":
                # START LOG
                msg = "Uscita dal programma in quanto conferma per continuare negativa"
                tmp_logger.info(msg, msg)
                # END LOG
                sys.exit(0)

        # START LOG
        # Creazione del logger su file
        if config_dict['filelogger']:
            m2g_logger = logger.Logger(modelname, config_dict['workingdir'],
                                       config_log)
            msg = "Informazioni dal file di configurazione estrapolate. Riassunto della configurazione \n" + conf_str
            coloredlogs.install(level="DEBUG", logger=m2g_logger.clogger)
            m2g_logger.info(
                msg, "Informazioni dal file di configurazione estrapolate")
        # END LOG

        # Se il campo notifier è True allora imposto un argomento di sistema a 1, altrimenti 0
        # Questo viene fatto in quanto l'attivazione o la disattivazione del notifier
        # in ogni file dipende da una MACRO che si basa sull'ultimo elemento dato in input (come
        # se fosse da riga di comando).
        if config_dict['notifier']:
            sys.argv.append(1)
        else:
            sys.argv.append(0)

        return config_dict, m2g_logger

    except AssertionError as ae:
        msg = f"\031[1;32;40modelica2GPU ha riscontrato il seguente errore. {ae.args[0]}"
        tmp_logger.error(msg, msg)
        sys.exit(1)
예제 #11
0
def train_multitask(model, train_data, dev_data, config, test_data=None):
    log = logger.Logger(config.save_path)
    config.best_score = -100
    train_data_1, train_data_2 = train_data
    dev_data_1, dev_data_2 = dev_data
    test_data_1, test_data_2 = test_data
    # slm_num = 0
    # slm_pos = torch.tensor(0.0)
    # for data in train_data_2:
    #     slm_num += data[-1].shape[-1]
    #     slm_pos += torch.sum(data[-1]).type_as(torch.tensor(0.3))
    # neg_weight = slm_pos / slm_num
    # pos_weight = 1 - neg_weight
    # slm_loss_weight = torch.tensor([neg_weight, pos_weight]).cuda()

    slm_loss = nn.CrossEntropyLoss()  #slm_loss_weight)
    slot_loss_function = nn.CrossEntropyLoss(ignore_index=0)
    intent_loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(
        gamma=0.1,
        milestones=[config.epochs // 4, config.epochs // 2],
        optimizer=optimizer)
    slu_f1_scores = []  #early stop
    for epoch in range(config.epochs):
        model.train()
        losses_slu = []
        losses_slm = []
        losses_all = []
        scheduler.step()
        for i, (batch_1, batch_2) in enumerate(
                zip(data_loader(train_data_1, config.batch_size, True),
                    data_loader(train_data_2, config.batch_size, True))):
            h, c, slot, intent = pad_to_batch(batch_1, model.vocab,
                                              model.slot_vocab)
            h = [hh.to(device) for hh in h]
            c = c.to(device)
            slot = slot.to(device)
            intent = intent.to(device)

            slot_p, intent_p = model(h, c)
            loss_s = slot_loss_function(slot_p, slot.view(-1))
            loss_i = intent_loss_function(intent_p, intent.view(-1))
            loss_slu = loss_s + loss_i
            losses_slu.append(loss_slu.item())
            if config.slm_weight > 0:
                slm_h, slm_candi, slm_label = pad_to_batch_slm(
                    batch_2, model.vocab)
                slm_h = [hh.to(device) for hh in slm_h]
                slm_candi = [hh.to(device) for hh in slm_candi]
                slm_label = slm_label.to(device)
                slm_p = model(slm_h, slm_candi, slm=True).view(-1, 2)

                loss_slm = slm_loss(slm_p, slm_label.view(-1))
                losses_slm.append(loss_slm.item())
            else:
                loss_slm = 0
                losses_slm.append(loss_slm)

            optimizer.zero_grad()
            # if epoch >= config.epochs//4:
            #     config.slm_weight = config.slm_weight/2
            # elif epoch >= config.epochs//2:
            #config.slm_weight = config.slm_weight/(epoch+1)
            loss = loss_slm * config.slm_weight + (
                1 - config.slm_weight) * loss_slu
            losses_all.append(loss.item())

            loss.backward()
            optimizer.step()

            if i % 40 == 0:
                # SLU
                intent_acc = accuracy_score(
                    intent.view(-1).tolist(),
                    intent_p.max(1)[1].tolist())
                slot_f1 = f1_score(slot.view(-1).tolist(),
                                   slot_p.max(1)[1].tolist(),
                                   average='micro')
                # SLM
                if config.slm_weight > 0:
                    label = slm_label.view(-1).tolist()
                    pred = slm_p.max(1)[1].tolist()
                    slm_acc = accuracy_score(label, pred)
                    slm_recall = recall_score(label, pred)
                else:
                    slm_acc = 0
                    slm_recall = 0
                metrics_dict = {
                    'loss_all': np.round(np.mean(losses_all), 2),
                    'loss_slm': np.round(np.mean(losses_slm), 2),
                    'losses_slu': np.round(np.mean(losses_slu), 2),
                    # 'intent_acc': np.round(intent_acc,2),
                    # 'slot_f1': np.round(slot_f1,2),
                    # 'slm_acc': np.round(slm_acc,2),
                    # 'slm_recall': np.round(slm_recall,2)
                }
                log_printer(log,
                            "train",
                            epoch="{}/{}".format(epoch, config.epochs),
                            iters="{}/{}".format(
                                i,
                                len(train_data_1) // config.batch_size),
                            metrics=metrics_dict)
                losses_all = []
                losses_slu = []
                losses_slm = []

        metric, loss = evaluation_multi(model, dev_data_1, dev_data_2, config)

        metrics_dict = {
            'loss_all': np.round(np.mean(loss[0]), 2),
            'loss_slm': np.round(np.mean(loss[1]), 2),
            'losses_slu': np.round(np.mean(loss[2]), 2),
            'intent_acc': np.round(metric[0], 2),
            'slot_f1': np.round(metric[1], 2),
            'slm_acc': np.round(metric[2], 2),
            'slm_recall': np.round(metric[3], 2)
        }
        log_printer(log,
                    'eval',
                    epoch="{}/{}".format(epoch, config.epochs),
                    iters="{}/{}".format(
                        i,
                        len(train_data_1) // config.batch_size),
                    metrics=metrics_dict)
        early_metric = -loss[2]  #metric[1]
        if early_metric > config.best_score:
            slu_f1_scores = []
            config.best_score = early_metric
            evaluation(model, (test_data_1, test_data_2), config)
            save(model, config)
        slu_f1_scores.append(early_metric)
        if len(slu_f1_scores) > config.early_stop and config.early_stop != 0:
            print('Early stop after f1 score did not increase after {} epochs'.
                  format(config.early_stop))
            return
예제 #12
0
argument_parser.add_argument("-l",
                             "--logger",
                             help="Path del file di configurazione del logger",
                             type=str,
                             required=True)
argument_parser.add_argument(
    "-c",
    "--config",
    help="Path del file di configurazione di modelica2GPU",
    type=str,
    required=True)
args = argument_parser.parse_args()
config_log = args.logger
config_m2g = args.config

tmp_logger = logger.Logger(None, ".", config_log, False)
coloredlogs.install(level="DEBUG", logger=tmp_logger.clogger)
start = time.time()


def createXML(workingdir, modelfilename, omlibrary):
    """ Crea l'XML tramite il comando del compilatore openmodelica e ritorna il nome del file """
    try:
        cwd = os.getcwd()
        # Entro nella directory desiderata nella quale salvare l'XML
        os.chdir(workingdir)
        # Controllo che la directory per la libreria di modelica installata sia vera
        assert os.path.isdir(
            omlibrary
        ) is not None, "Non esistono versioni di modelica installate nel sistema"
        # Creo la stringa per la compilazione
예제 #13
0
 def init_logger(self):
     for i in range(self.num_node):
         self.loggers[i] = logger.Logger(self.logdir, i, self.use_logger)
         self.loggers[i].write_miners(self.fixed_miners)
예제 #14
0
import pickle

import tensorflow as tf
from sklearn.model_selection import ParameterGrid
from sklearn.svm import SVC
from tqdm import tqdm

import utils.parameters as params
from utils import logger
from utils.data_processing import *
from utils.evaluate import *

FIXED_PARAMETERS = params.load_parameters()
modname = FIXED_PARAMETERS["model_name"]
logpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".log"
logger = logger.Logger(logpath)

model = FIXED_PARAMETERS["model_type"]

module = importlib.import_module(".".join(['models', model]))
MyModel = getattr(module, 'MyModel')

# Logging parameter settings at each launch of training script
# This will help ensure nothing goes awry in reloading a model and we consistenyl use the same hyperparameter settings.
logger.log("FIXED_PARAMETERS\n %s" % FIXED_PARAMETERS)

logger.log("Loading data")
training_mnli = load_nli_data(
    FIXED_PARAMETERS["training_mnli"],
    udpipe_path=FIXED_PARAMETERS['udpipe_path'],
    seq_length=FIXED_PARAMETERS['seq_length'],
예제 #15
0
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################

import os
import commands
import utils.logger as log
import utils.infra_setup.heat.manager as client_manager

LOG = log.Logger(__name__).getLogger()

neutron_quota = {
    "subnet": -1,
    "network": -1,
    "floatingip": -1,
    "subnetpool": -1,
    "router": -1,
    "port": -1,
    "security_group": -1
}

nova_quota = {
    "ram": -1,
    "cores": -1,
    "instances": -1,
예제 #16
0
    file.write(table)
    print("Table successfully written!")
    file.close()


if __name__ == '__main__':

    ### SETTING UP TENSORBOARD LOGGER ###
    result_directory = 'results/'
    log_directory = 'results/logs/'
    if not os.path.exists(result_directory):
        os.makedirs(result_directory)
    if not os.path.exists(log_directory):
        os.makedirs(log_directory)

    logger = logger.Logger('results/logs/')

    ### PARSING ARGUMENTS ###
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    ### PARAMETERS ###

    # LSTM & Q Learning
    IMAGE_SCALE = 28
예제 #17
0
파일: step1.py 프로젝트: sfssqs/Quant
#!/usr/bin/python

import sys
sys.path.append("..")

import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time

from enum import Enum
from utils import logger as log

RESISTANCE_FACTOR = 0.3
logger = log.Logger()


class FilterType(Enum):
    local = 1
    server = 2


def _normalize(data, type):
    if type == FilterType.local:
        filter_data = data.loc[:, ['open', 'high', 'close', 'low']]
        filter_data['price_change'] = filter_data.close - filter_data.open
        return filter_data
    elif type == FilterType.server:
        filter_data = data.loc[:, ['date', 'open', 'high', 'close', 'low']]
        filter_data['price_change'] = filter_data.close - filter_data.open
예제 #18
0
def main():
    parser = argparse.ArgumentParser()

    # GSN settings
    parser.add_argument('--layers', type=int,
                        default=3)  # number of hidden layers
    parser.add_argument('--walkbacks', type=int,
                        default=5)  # number of walkbacks
    parser.add_argument('--hidden_size', type=int, default=1500)
    parser.add_argument('--hidden_act', type=str, default='tanh')
    parser.add_argument('--visible_act', type=str, default='sigmoid')

    # training
    parser.add_argument(
        '--cost_funct', type=str,
        default='binary_crossentropy')  # the cost function for training
    parser.add_argument('--n_epoch', type=int, default=500)
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument(
        '--save_frequency', type=int,
        default=10)  #number of epochs between parameters being saved
    parser.add_argument('--early_stop_threshold', type=float, default=0.9996)
    parser.add_argument('--early_stop_length', type=int,
                        default=30)  #the patience number of epochs

    # noise
    parser.add_argument('--hidden_add_noise_sigma', type=float,
                        default=4)  #default=2
    parser.add_argument('--input_salt_and_pepper', type=float,
                        default=0.8)  #default=0.4

    # hyper parameters
    parser.add_argument('--learning_rate', type=float, default=0.25)
    parser.add_argument('--momentum', type=float, default=0.5)
    parser.add_argument('--annealing', type=float, default=0.995)
    parser.add_argument('--noise_annealing', type=float, default=0.98)

    # data
    parser.add_argument('--dataset', type=str, default='MNIST')
    parser.add_argument('--data_path', type=str, default='../data/')
    parser.add_argument('--classes', type=int, default=10)
    parser.add_argument('--output_path', type=str, default='../outputs/gsn/')

    # argparse does not deal with booleans
    parser.add_argument('--vis_init', type=int, default=0)
    parser.add_argument('--noiseless_h1', type=int, default=1)
    parser.add_argument('--input_sampling', type=int, default=1)
    parser.add_argument('--test_model', type=int, default=0)
    parser.add_argument('--continue_training', type=int, default=0)  #default=0

    args = parser.parse_args()

    ########################################
    # Initialization things with arguments #
    ########################################
    outdir = args.output_path + "/" + args.dataset + "/"
    data.mkdir_p(outdir)
    args.output_path = outdir

    # Create the logger
    logger = log.Logger(outdir)
    logger.log("---------CREATING GSN------------\n\n")
    logger.log(args)

    # See if we should load args from a previous config file (during testing)
    config_filename = outdir + 'config'
    if args.test_model and 'config' in os.listdir(outdir):
        config_vals = load_from_config(config_filename)
        for CV in config_vals:
            logger.log(CV)
            if CV.startswith('test'):
                logger.log('Do not override testing switch')
                continue
            try:
                exec('args.' + CV) in globals(), locals()
            except:
                exec('args.' + CV.split('=')[0] + "='" + CV.split('=')[1] +
                     "'") in globals(), locals()
    else:
        # Save the current configuration
        # Useful for logs/experiments
        logger.log('Saving config')
        with open(config_filename, 'w') as f:
            f.write(str(args))

    ######################################
    # Load the data, train = train+valid #
    ######################################
    if args.dataset.lower() == 'mnist':
        (train_X,
         train_Y), (valid_X,
                    valid_Y), (test_X,
                               test_Y) = data.load_mnist(args.data_path)
        train_X = numpy.concatenate((train_X, valid_X))
        train_Y = numpy.concatenate((train_Y, valid_Y))
    else:
        raise AssertionError(
            "Dataset not recognized. Please try MNIST, or implement your own data processing method in data_tools.py"
        )

    # transfer the datasets into theano shared variables
    train_X, train_Y = data.shared_dataset((train_X, train_Y), borrow=True)
    valid_X, valid_Y = data.shared_dataset((valid_X, valid_Y), borrow=True)
    test_X, test_Y = data.shared_dataset((test_X, test_Y), borrow=True)

    ##########################
    # Initialize the new GSN #
    ##########################
    gsn = GSN(train_X, valid_X, test_X, vars(args), logger)

    # Load initial weights and biases from file if testing
    params_to_load = 'gsn_params.pkl'
    if args.test_model and os.path.isfile(params_to_load):
        logger.log("\nLoading existing GSN parameters")
        loaded_params = cPickle.load(open(params_to_load, 'r'))
        [
            p.set_value(lp.get_value(borrow=False)) for lp, p in zip(
                loaded_params[:len(gsn.weights_list)], gsn.weights_list)
        ]
        [
            p.set_value(lp.get_value(borrow=False)) for lp, p in zip(
                loaded_params[len(gsn.weights_list):], gsn.bias_list)
        ]
    else:
        logger.log(
            "Could not find existing GSN parameter file {}, training instead.".
            format(params_to_load))
        args.test_model = False

    #########################################
    # Train or test the new GSN on the data #
    #########################################
    # Train if not test
    if not args.test_model:
        gsn.train()
    # Otherwise, test
    else:
        gsn.test()