Esempio n. 1
0
def compare():
	print("Inference Original AlexNet Graph:")
	alexnet_config = load_config('config/alexnet.yaml')
	test_frozen_graph(_config=alexnet_config, _data=[sample])

	print("Inference Tailored AlexNet Graph:")
	alexnet_tailored_config = load_config('config/alexnet_tailored.yaml')
	test_frozen_graph(_config=alexnet_tailored_config,  _data=[sample])
Esempio n. 2
0
 def __init__(self):
     QMainWindow.__init__(self)
     myicon = QIcon()
     myicon.addPixmap(base2Qpixmap(icon_png), QIcon.Normal, QIcon.Off)
     self.setWindowIcon(myicon)
     self.PLAYING = False
     self.FLOATING = False
     self.detector = poe_detector(self)
     self.detector.playing_signal.connect(self.set_playing)
     self.SETTING = False
     self.is_setting = lambda: self.SETTING
     self.WORKING = False
     self.MOVEFLOWTING = False
     self.is_movingFloating = lambda: self.FLOATING and self.MOVEFLOWTING
     self.is_working = lambda: self.WORKING and self.detector.check_immediately(
     )
     self.is_editOK = lambda: not self.WORKING
     self.config_list = list_ini('./configs')
     self.config_name = load_ini(self.config_list)
     self.now_config = lambda: self.config_name
     OK, self.setting = load_config(f"./configs/{self.config_name}.ini")
     self.event = btn_events(self)
     self.ui = A_form(self, self.event)
     self.floating_window = B_form(self)
     self.linstener = input_listener(self)
     self.linstener.start()
     self.detector.start()
     self.check_updata = check_version_thread()
     self.check_updata.update_signal.connect(self.need2update)
     self.check_updata.start()
Esempio n. 3
0
File: main.py Progetto: memoiry/NLU
def evaluate_line():
    # 加载意图识别模型

    id_to_cat = get_id_to_cat('{}/categories.txt'.format(data_path))

    print(
        "==========================Loading the Intention Classification model....=========================="
    )
    model_1 = ImportGraph('{}/model_cnn'.format(model_path))
    model_2 = ImportGraph('{}/model_rnn'.format(model_path))
    print("Model loaded..")
    flag = 0

    # 加载命名实体识别模型
    print(
        "==========================Loading the NER model....=========================="
    )
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)

        # 循环识别

        while True:
            # try:
            #     line = input("请输入测试句子:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

            # 获取测试句子
            text = input("请输入要进行识别的句子:")

            # intent 识别
            id_text = process_text(text, '{}/vocab.txt'.format(data_path))
            pred_1 = model_1.run(id_text, 1.0)
            pred_2 = model_2.run(id_text, 1.0)
            pred = pred_1 + pred_2
            res = id_to_cat[int(np.argmax(pred))]
            print(res)

            # NER 识别
            result = model.evaluate_line(sess,
                                         input_from_line(text, char_to_id),
                                         id_to_tag)
            print(result)
def connect_to_twitter_api(wait_on_rate_limit=False):

    twitter_keys = utils.load_config()['default']['twitter']

    auth = tweepy.OAuthHandler(twitter_keys['consumer_key'],
                               twitter_keys['consumer_secret'])

    auth.set_access_token(twitter_keys['access_token_key'],
                          twitter_keys['access_token_secret'])

    return tweepy.API(auth, wait_on_rate_limit=wait_on_rate_limit)
Esempio n. 5
0
    def __init__(self, args=None) -> None:
        super().__init__()
        args = load_config(path=args['config'])
        self.batch_size = args['batch_size']
        self.epochs = args['epochs']
        self.learning_rate = args['learning_rate']
        self.optimizer = args['optimizer']
        self.save_freq = args['save_freq']

        # Call some functions
        self.build_model()
def retrieve_users_hashtags():
    """
    Retrieve list of users and hashtags from "Hashtags and users" spreadsheet. This file contains one tab for
        Users and another one for Hashtags, and it is updated manually, following a predefined structure
    """

    # Get spreadsheet_id from config file
    spreadsheet_id = utils.load_config()['default']['drive']['spreadsheet_id']

    retrieve_from_spreadsheet(spreadsheet_id, USERS_RANGE, PATH_ORIGINAL_USERS)
    retrieve_from_spreadsheet(spreadsheet_id, HASHTAGS_RANGE,
                              PATH_ORIGINAL_HASHTAGS)
Esempio n. 7
0
    def __init__(self, cfg_path, gpu_num, exp_name):
        self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
        self.device = torch.device("cuda:"+str(gpu_num))

        self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
        self.total_training_iters = 2
        self.num_batches_dis_train = 5
        self.num_batches_gen_train = 5
        self.mesh_num_vertices = 1498
        self.label_noise = 0
        self.semantic_dis_loss_num_render = 8

        self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
        if not os.path.exists(self.training_output_dir):
            os.makedirs(self.training_output_dir)
        self.tqdm_out = utils.TqdmPrintEvery()
Esempio n. 8
0
def main() -> None:
    utils.get_logger("bot")

    # Terminate if the config failed to load.
    if not utils.load_config():
        return

    # Set logging level to debug if verbose is true.
    if g.config["verbose"]:
        g.log.setLevel(logging.DEBUG)
        g.log.handlers[0].setLevel(logging.DEBUG)

    try:
        g.exchanges = exchanges.get_exchanges()
        g.db = Database()
        image.init()
        Twitter(callback)
    except Exception as e:
        g.log.critical(f"{type(e).__name__}: {e}")
        g.log.critical(traceback.format_exc())
def evaluate_line():
    config = load_config(args.config_file)
    logger = get_logger(args.log_file)
    # limit GPU memory 限制GPU的内存大小
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(args.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
            f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        while True:
            try:
                line = input("请输入测试句子:")
                result = model.evaluate_line(sess,
                                             input_from_line(line, char_to_id),
                                             id_to_tag, id_to_intent)
                print(result)
            except Exception as e:
                logger.info(e)
Esempio n. 10
0
def evaluate_test():
    config = load_config(args.config_file)
    logger = get_logger(args.log_file)

    with open(args.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
            f)

    test_sentences = load_sentences(args.test_file, args.lower, args.zeros)
    update_tag_scheme(test_sentences, args.tag_schema)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                intent_to_id, args.lower)
    test_manager = BatchManager(test_data, 100)

    # limit GPU memory 限制GPU的内存大小
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)

        evaluate(sess, model, "test", test_manager, id_to_tag, logger)
Esempio n. 11
0
File: main.py Progetto: memoiry/NLU
def evaluate_line_ner():
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        while True:
            # try:
            #     line = input("请输入测试句子:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

            line = input("请输入测试句子:")
            result = model.evaluate_line(sess,
                                         input_from_line(line, char_to_id),
                                         id_to_tag)
            print(result)
Esempio n. 12
0
"""

import os
import cv2
import numpy as np
import pandas as pd
from numpy import array as arr
from utils.calibration_utils import *
from triangulation.triangulate import *
from calibration.extrinsic import *
from utils.utils import load_config
import math
#%%
#config = load_config('config_20200804_FR_static.toml' )
#config = load_config('config_20200922_RT2D_static.toml' )
config = load_config('config_20200221_static.toml')
#config = load_config('config_20200804_FR.toml' )
path, videos, vid_indices = get_video_path(config)
intrinsics = load_intrinsics(path, vid_indices)
extrinsics = load_extrinsics(path)
#%%
from triangulation.triangulate import reconstruct_3d
recovery = reconstruct_3d(config)

#%% Save 3d recovery json file
import numpy as np
from json import JSONEncoder
import json


class NumpyArrayEncoder(JSONEncoder):
Esempio n. 13
0
def run_worker():
    redis_connection = redis.from_url(os.environ['RQ_URL'])
    with Connection(redis_connection):
        from app.service import rq_exception_handler
        worker = Worker(['default'],
                        exception_handlers=rq_exception_handler,
                        disable_default_exception_handler=True)
        worker.work()


if __name__ == '__main__':
    from utils import logutil, utils
    import os

    utils.load_config(os.environ["ENV_CONFIG"])
    logutil.setup_logger()
    import redis
    from rq import Connection, Worker
    # Hace que las tareas empiecen mas rapido
    from processor.processor2 import processa

    run_worker()
Esempio n. 14
0
import argparse
from matplotlib import pyplot as plt
from src.config import Config 
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from model.RESNET import resnet18,resnet34,resnet50,resnet101,resnet152
from utils.utils import load_config,train_tf,test_tf

if __name__ == '__main__':
    config = load_config(mode)
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    parser = argparse.ArgumentParser()
    parser.add_argument('-weights', type=str, required=True, help='the weights file you want to test')
    net = resnet34().to(config.DEVICE)

    test_set = ImageFolder(config.TEST_PATH,transform=test_tf)
    test_data=torch.utils.data.DataLoader(test_set, batch_size=config.BATCH_SIZE, shuffle=False)

    net.load_state_dict(torch.load(args.weights), config.DEVICE)
    print(net)
    net.eval()
Esempio n. 15
0
 def change_config(self, config_name):
     self.config_name = config_name
     save_ini(config_name)
     OK, self.setting = load_config(f"./configs/{self.config_name}.ini")
     self.ui.load_setting()
    find_source_phone,
    valida_instancia,
    find_prompt_arn,
    get_agent_data,
    find_agent_arn,
    valida_telefono
)

from utils.utils import (
    load_config,
    get_params, 
    get_param
    )

print ("Description: >")
config = load_config('project_params.json')

STACK_NAME = os.environ.get('STACK_NAME')


INSTANCE_ID = get_param(f"/ct-manager/{STACK_NAME}/instance-id")
INSTANCE_ALIAS = get_param(f"/ct-manager/{STACK_NAME}/instance-alias")
SOURCE_PHONE = get_param(f"/ct-manager/{STACK_NAME}/source-phone")
REGION = get_param(f"/ct-manager/{STACK_NAME}/region")
BEEP_PROMPT = get_param(f"/ct-manager/{STACK_NAME}/beep-prompt")
DEFAULT_AGENT =get_param(f"/ct-manager/{STACK_NAME}/default-agent")
PRESIGNED_URL_DURATION = get_param(f"/ct-manager/{STACK_NAME}/presigned-url-duration")


TAGS = config['tags']
Esempio n. 17
0
def train():
    # load data sets
    train_sentences = load_sentences(args.train_file, args.lower, args.zeros)
    dev_sentences = load_sentences(args.dev_file, args.lower, args.zeros)
    test_sentences = load_sentences(args.test_file, args.lower, args.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    # 检测并维护数据集的 tag 标记
    update_tag_scheme(train_sentences, args.tag_schema)
    update_tag_scheme(test_sentences, args.tag_schema)
    update_tag_scheme(dev_sentences, args.tag_schema)

    # create maps if not exist
    # 根据数据集创建 char_to_id, id_to_char, tag_to_id, id_to_tag 字典,并储存为 pkl 文件
    if not os.path.isfile(args.map_file):
        # create dictionary for word
        if args.pre_emb:
            dico_chars_train = char_mapping(train_sentences, args.lower)[0]
            # 利用预训练嵌入集增强(扩充)字符字典,然后返回字符与位置映射关系
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), args.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      args.lower)

        # Create a dictionary and a mapping for tags
        # 获取标记与位置映射关系
        tag_to_id, id_to_tag, intent_to_id, id_to_intent = tag_mapping(
            train_sentences)

        with open(args.map_file, "wb") as f:
            pickle.dump([
                char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id,
                id_to_intent
            ], f)
    else:
        with open(args.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag, intent_to_id, id_to_intent = pickle.load(
                f)

    # 提取句子特征
    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 intent_to_id, args.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               intent_to_id, args.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                intent_to_id, args.lower)

    # code.interact(local=locals())

    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), len(dev_data), len(test_data)))

    # 获取可供模型训练的单个批次数据
    train_manager = BatchManager(train_data, args.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    # make path for store log and model if not exist
    make_path(args)
    if os.path.isfile(args.config_file):
        config = load_config(args.config_file)
    else:
        config = config_model(char_to_id, tag_to_id, intent_to_id)
        save_config(config, args.config_file)
    make_path(args)

    logger = get_logger(args.log_file)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    # 训练集全量跑一次需要迭代的次数
    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        # 此处模型创建为项目最核心代码
        model = create_model(sess, Model, args.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss_slot = []
        loss_intent = []

        # with tf.device("/gpu:0"):
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss_slot, batch_loss_intent = model.run_step(
                    sess, True, batch)
                loss_slot.append(batch_loss_slot)
                loss_intent.append(batch_loss_intent)

                if step % args.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "INTENT loss:{:>9.6f}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss_intent),
                                    np.mean(loss_slot)))
                    loss_slot = []
                    loss_intent = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                # if i%7 == 0:
                save_model(sess, model, args.ckpt_path, logger)
        evaluate(sess, model, "test", test_manager, id_to_tag, logger)
Esempio n. 18
0
from utils.calibration_utils import *
from triangulation.triangulate import *
from calibration.extrinsic import *

cam_path = 'C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/videos/results_Qiwei_New_Iter3'
cam1_filename = '/Crackle_20201203_00001DLC_resnet50_TestDec3shuffle1_1030000filtered.csv'
cam2_filename = '/Crackle_20201203_00002DLC_resnet50_TestDec3shuffle1_1030000filtered.csv'
cam3_filename = '/Crackle_20201203_00003DLC_resnet50_TestDec3shuffle1_1030000filtered.csv'
cam4_filename = '/Crackle_20201203_00004DLC_resnet50_TestDec3shuffle1_1030000filtered.csv'

#config_path = 'C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/config_Crackle_20201203_RT3D_static.toml'

#config_path = 'C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/Iteration_2_results/config_Crackle_20201203_RT3D_static_Iter2.toml'
#config_path = 'C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/Iteration_3_results/config_Crackle_20201203_RT3D_static_Iter3.toml'
config_path = 'C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/Iteration_3_results/config_Crackle_20201203_RT2D_static_Iter3.toml'
config = load_config(config_path)

Recovery_3D_path = "C:/Users/dongq/DeepLabCut/Crackle-Qiwei-2020-12-03/Iteration_3_results/Crackle_20201203_RT2D.json"

reprojected_file_prefix = 'Crackle_20201203_RT2D'  #The file name of Recovery_3D_path

#%%
from triangulation.triangulate import reconstruct_3d
recovery = reconstruct_3d(config)

#%% Save 3d recovery json file
import numpy as np
from json import JSONEncoder
import json

Esempio n. 19
0
# update labeling scheme and bodyparts interested
# only need to update if using something other than base arm points
parsed_toml['labeling']['scheme'] = []
parsed_toml['labeling']['bodyparts_interested'] = [
    'back1', 'shoulder1', 'elbow1', 'elbow2', 'wrist1', 'wrist2', 'hand1',
    'hand2', 'hand3', 'pointX', 'pointY', 'pointZ', 'pinky1', 'pinky2', 'wand',
    'cup'
]

recon_config_file = project_folder + r'\recon_config.toml'

with open(recon_config_file, 'w+') as file:
    toml.dump(parsed_toml, file)

config = load_config(recon_config_file)

#%% If you already ran calibration you don't need to run these.
calibrate_intrinsic(config)
calibrate_extrinsic(config)

#%% add static points if reference frame is provided
if (use_reference_frame):
    labels = ['pointX', 'pointY', 'pointZ']

    snapshots = vid_list

    # initialize static
    static = {label: [] for label in labels}

    # get labeled reference point for each camera and store in a new file, also copy over other tracking data
Esempio n. 20
0
def main(mode=None):
    time_now = datetime.now().isoformat()
    config = load_config(mode)
    
    # 随机数种子
    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)
    
    # 记载训练集和测试集
    train_set = ImageFolder(config.TRAIN_PATH, transform=train_tf) 
    length_train = len(train_set)
    train_data=torch.utils.data.DataLoader(train_set,batch_size=config.BATCH_SIZE,shuffle=True)
    iter_per_epoch = len(train_data)

    test_set = ImageFolder(config.TEST_PATH, transform=test_tf)
    length_test = len(test_set)
    test_data=torch.utils.data.DataLoader(test_set, batch_size=config.BATCH_SIZE, shuffle=True)
    
    # INIT GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    # choose network
    net = VGG16().to(config.DEVICE)
    print('The Model is VGG16\n')  
    
    # use tensorboardx
    if not os.path.exists(config.LOG_DIR):
        os.mkdir(config.LOG_DIR)
    writer = SummaryWriter(log_dir=os.path.join(
            config.LOG_DIR, time_now))

    # optimizer and loss function
    optimizer = optim.SGD(net.parameters(),lr=config.LR, momentum=0.9,weight_decay=5e-4)
    loss_function = nn.CrossEntropyLoss()

    # warmup
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=config.MILESTONES,gamma=0.5)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * config.WARM)
                 
    # create checkpoint folder to save model
    model_path = os.path.join(config.PATH,'model')
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path,'{epoch}-{type}.pth')
                 
    best_acc = 0.0
    a = config.EPOCH

    for epoch in range(1, config.EPOCH):

        if epoch > config.WARM:
            train_scheduler.step(epoch)
    
        ### train ###
        net.train()   
        train_loss = 0.0 # cost function error
        train_correct = 0.0

        for i, data in enumerate(train_data):

            if epoch <= config.WARM:
                warmup_scheduler.step()

            length = len(train_data)
            image, label = data
            image, label = image.to(config.DEVICE),label.to(config.DEVICE)

            output = net(image)
            train_correct += get_acc(output, label)
            loss = loss_function(output, label)
            train_loss +=loss.item()

            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            last_layer = list(net.children())[-1]
            n_iter = (epoch-1) * iter_per_epoch +i +1
            for name, para in last_layer.named_parameters():
                if 'weight' in name:
                    writer.add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)
                if 'bias' in name:
                    writer.add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)
            
            print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tAcc: {:0.4f}\tLR: {:0.6f}'.format(
                train_loss/(i+1),
                train_correct/(i+1),
                optimizer.param_groups[0]['lr'],
                epoch=epoch,
                trained_samples=i * config.BATCH_SIZE + len(image),
                total_samples=length_train
            ))
            writer.add_scalar('Train/lr',optimizer.param_groups[0]['lr'] , n_iter)
            writer.add_scalar('Train/loss', (train_loss/(i+1)), n_iter)
            writer.add_scalar('Train/acc', (train_correct/(i+1)), n_iter)
        
        ## eval ### 
        if epoch%1==0:
#             net.eval()
#             test_loss = 0.0    
#             test_correct = 0.0

#             for i, data in enumerate(test_data):
#                 images, labels = data
#                 images, labels = images.to(config.DEVICE),labels.to(config.DEVICE)

#                 outputs = net(images)
#                 loss = loss_function(outputs, labels)
#                 test_loss += loss.item()
#                 test_correct += get_acc(outputs, labels)

#                 print('Testing: [{test_samples}/{total_samples}]\tAverage loss: {:.4f}, Accuracy: {:.4f}'.format(
#                 test_loss /(i+1),
#                 test_correct / (i+1),
#                 test_samples=i * config.BATCH_SIZE + len(images),
#                 total_samples=length_test))

#             writer.add_scalar('Test/Average loss', (test_loss/(i+1)), n_iter)
#             writer.add_scalar('Test/Accuracy', (test_correct/(i+1)), n_iter)
#             print()

            #start to save best performance model 
#             acc = test_correct/(i+1)  
#             if epoch > config.MILESTONES[1] and best_acc < acc:
#                 torch.save(net.state_dict(), checkpoint_path.format(epoch=epoch, type='best'))
#                 best_acc = acc
#                 continue

            if not epoch % config.SAVE_EPOCH:
                torch.save(net.state_dict(), checkpoint_path.format(epoch=epoch, type='regular'))
    writer.close()
     render_template, abort, send_from_directory
from utils import utils
import models
import requests
from flask.json import jsonify
import urllib
from SEAPI import SEAPI
import datetime

try:
    import json
except ImportError:
    import simplejson as json

app = Flask(__name__)
utils.load_config(app)

from models import db

db.init_app(app)

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/<path:resource>')
def serveStaticResource(resource):
    return send_from_directory('static/', resource)

@app.route("/test")
def test():
Esempio n. 22
0
"""
Created on Thu Jul 25 14:31:01 2019

@author: minyoungpark
"""

from utils.utils import load_config
from calibration.intrinsic import calibrate_intrinsic
from calibration.extrinsic import calibrate_extrinsic
from triangulation.triangulate import reconstruct_3d
from utils.vis_utils import generate_three_dim_video
from utils.vis_utils import generate_three_dim_pictures

#%%
#config = load_config('config_20200804_FR.toml' )
config = load_config('config_Test_20201123_15in.toml')

#%% If you already ran calibration you don't need to run these.
#%%
calibrate_intrinsic(config)
#%%
calibrate_extrinsic(config)

#%%
from utils.triangulation_utils import add_static_points

import numpy as np

labels = ['pointX', 'pointY', 'pointZ']

snapshots = [
Esempio n. 23
0
            self._handle_event()
            pygame.display.update()
            self.clock.tick(self.fps)
        self.run()

    def end(self):
        while not self.ready:
            self.draw_elements()
            self._handle_event()
            pygame.display.update()
            self.clock.tick(self.fps)
        self._reset()
        self.start()

    def run(self):
        while not self.gg:
            self.draw_elements()
            self._handle_event()
            if not self.pause:
                self.action()
            pygame.display.update()
            self.clock.tick(self.fps)
        self.ready = False
        self.end()


if __name__ == '__main__':
    args = load_config()
    mainScene = MainScene(args)
    mainScene.start()
import os, sys
import geopandas as gpd
import pandas as pd
from sqlalchemy import create_engine
from shapely.geometry import Point, Polygon

sys.path.append("..")
from utils.utils import load_config

config = load_config()
pano_dir = config['data']['pano_dir']
ENGINE = create_engine(config['data']['DB'])


def create_polygon_by_bbox(bbox):
    """creaet polygon by bbox(min_x, min_y, max_x, max_y)

    Args:
        bbox (list): [min_x, min_y, max_x, max_y]

    Returns:
        Polygon
    """

    coords = [
        bbox[:2], [bbox[0], bbox[3]], bbox[2:], [bbox[2], bbox[1]], bbox[:2]
    ]

    return Polygon(coords)

import sys
from pathlib import Path
import os
source_path = str(Path(os.path.abspath('ajv-first_data_understanding')).parent.parent / 'src')
if source_path not in sys.path:
    sys.path.insert(0, source_path)

import matplotlib.pyplot as plt
import pandas as pd
import re
import seaborn as sns

from utils import utils

data_path = utils.load_config('dev')['data_path']

# ## Reading the data

file = Path(data_path) / 'eviction0818crt41-42.csv.xlsx'

raw = pd.read_excel(file)

df = raw.copy()

df.columns = [c.strip().lower().replace(' ', '_') for c in df.columns]

# ## Some basic notions of the data

df.shape
Esempio n. 26
0
def main(mode=None):

    config = load_config(mode)

    torch.manual_seed(config.SEED)
    torch.cuda.manual_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    train_set = ImageFolder(config.TRAIN_PATH, transform=train_tf)
    length1 = len(train_set)
    train_data = torch.utils.data.DataLoader(train_set,
                                             batch_size=config.BATCH_SIZE,
                                             shuffle=True)
    iter_per_epoch = len(train_data)

    test_set = ImageFolder(config.TEST_PATH, transform=test_tf)
    test_data = torch.utils.data.DataLoader(test_set,
                                            batch_size=config.BATCH_SIZE,
                                            shuffle=False)

    # INIT GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    # choose network
    if config.MODEL == 1:
        net = VGG16().to(config.DEVICE)
        print('The Model is VGG\n')
    if config.MODEL == 2:
        net = resnet34().to(config.DEVICE)
        print('The Model is ResNet34\n')
    if config.MODEL == 3:
        net = mobilenet().to(config.DEVICE)
        print('The Model is mobilenet\n')
    if config.MODEL == 4:
        net = shufflenet().to(config.DEVICE)
        print('The Model is shufflenet\n')
#     print(dir(net))
#     # choose train or test
#     if config.MODE == 1:
#         print("Start Training...\n")
#         net.train()
#     if config.MODE == 2:
#         print("Start Testing...\n")
#         net.test()

    optimizer = optim.SGD(net.parameters(),
                          lr=config.LR,
                          momentum=0.9,
                          weight_decay=5e-4)
    loss_function = nn.CrossEntropyLoss()
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=config.MILESTONES, gamma=0.2)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * config.WARM)
    #     optimizer = optim.Adam(net.parameters(),lr=float(config.LR),betas=(config.BETA1, config.BETA2))

    # use tensorboard
    runs_path = os.path.join(config.PATH, 'runs')
    if not os.path.exists(runs_path):
        os.mkdir(runs_path)

#     writer=SummaryWriter(log_dir=runs_path)
#     input_tensor = torch.Tensor(12, 3, 32, 32).cuda()
#     writer.add_graph(net, Variable(input_tensor, requires_grad=True))

#create checkpoint folder to save model
    model_path = os.path.join(config.PATH, 'model')
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path, '{epoch}-{type}.pth')

    best_acc = 0.0
    for epoch in range(1, 100):
        if epoch > config.WARM:
            train_scheduler.step(epoch)

        ### train ###
        net.train()
        train_loss = 0.0  # cost function error
        train_correct = 0.0

        for i, data in enumerate(train_data):

            if epoch <= config.WARM:
                warmup_scheduler.step()

            length = len(train_data)
            image, label = data
            image, label = image.to(config.DEVICE), label.to(config.DEVICE)

            output = net(image)
            train_correct += get_acc(output, label)
            loss = loss_function(output, label)
            train_loss += loss.item()

            # backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print(
                'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tAcc: {:0.4f}\tLR: {:0.6f}'
                .format(train_loss / (i + 1),
                        train_correct / (i + 1),
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch,
                        trained_samples=i * 24 + len(image),
                        total_samples=len(train_data.dataset)))

    ##eval
        net.eval()
        test_loss = 0.0  # cost function error
        test_correct = 0.0

        for i, data in enumerate(test_data):
            images, labels = data
            images, labels = images.to(config.DEVICE), labels.to(config.DEVICE)

            outputs = net(images)
            loss = loss_function(outputs, labels)
            test_loss += loss.item()
            test_correct += get_acc(outputs, labels)

            print(
                'Test set: [{test_samples}/{total_samples}]\tAverage loss: {:.4f}, Accuracy: {:.4f}'
                .format(test_loss / (i + 1),
                        test_correct / (i + 1),
                        test_samples=i * 24 + len(images),
                        total_samples=len(test_data.dataset)))
        print()

        acc = test_correct / (i + 1)
        #start to save best performance model after learning rate decay to 0.01
        if epoch > config.MILESTONES[1] and best_acc < acc:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='best'))
            best_acc = acc
            continue

        if not epoch % config.SAVE_EPOCH:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='regular'))
from pathlib import Path
import os
import sys
source_path = str(
    Path(os.path.abspath('ajv_dfa-first_draft')).parent.parent / 'src')
if source_path not in sys.path:
    sys.path.insert(0, source_path)

from utils import utils

import logging
import pandas as pd
import tweepy
import yaml

twitter_keys = utils.load_config()['default']['twitter']


def connect_to_twitter_api(wait_on_rate_limit=False):

    twitter_keys = utils.load_config()['default']['twitter']

    auth = tweepy.OAuthHandler(twitter_keys['consumer_key'],
                               twitter_keys['consumer_secret'])

    auth.set_access_token(twitter_keys['access_token_key'],
                          twitter_keys['access_token_secret'])

    return tweepy.API(auth, wait_on_rate_limit=wait_on_rate_limit)

def main(mode=None):
    global name, logger

    #Tag_ResidualBlocks_BatchSize
    name = "my_log"
    logger = SummaryWriter("runs/" + name)

    cat_dir = "D:/Codewyf/AI/data/datasets/test/cat_test/"
    dog_dir = "D:/Codewyf/AI/data/datasets/test/dog_test/"

    config = load_config(mode)

    torch.manual_seed(config.SEED)  #为CPU设计种子用于生成随机数,以使得结果是确定的
    torch.cuda.manual_seed(config.SEED)  #为GPU设置随机种子,可以保证每次初始化相同
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    train_set = ImageFolder(config.TRAIN_PATH, transform=train_tf)  #设置训练路径
    length_train = len(train_set)  #return the number of items in a container
    train_data = torch.utils.data.DataLoader(train_set,
                                             batch_size=config.BATCH_SIZE,
                                             shuffle=True)  #
    iter_per_epoch = len(train_data)  #return the number of per epoch

    test_set = ImageFolder(config.TEST_PATH, transform=test_tf)
    length_test = len(test_set)
    test_data = torch.utils.data.DataLoader(test_set,
                                            batch_size=config.BATCH_SIZE,
                                            shuffle=True)

    cat_test_set = ImageFolder(cat_dir, transform=test_tf)
    length_test = len(test_set)
    cat_test_data = torch.utils.data.DataLoader(test_set,
                                                batch_size=config.BATCH_SIZE,
                                                shuffle=True)

    dog_test_set = ImageFolder(dog_dir, transform=test_tf)
    length_test = len(test_set)
    dog_test_data = torch.utils.data.DataLoader(test_set,
                                                batch_size=config.BATCH_SIZE,
                                                shuffle=True)

    # INIT GPU初始化GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
    if torch.cuda.is_available():
        config.DEVICE = torch.device("cuda")
        print('\nGPU IS AVAILABLE')
        torch.backends.cudnn.benchmark = True
    else:
        config.DEVICE = torch.device("cpu")

    # choose network选择一个网络
    net = resnet18().to(config.DEVICE)  #使用resnet18
    print('The Model is ResNet18\n')

    # optimizer and loss function       优化和损失函数
    optimizer = optim.SGD(
        net.parameters(), lr=config.LR, momentum=0.9,
        weight_decay=5e-4)  #Stochastic Gradient Descent随机梯度下降
    loss_function = nn.CrossEntropyLoss()  #交叉熵损失函数

    # warmup
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=config.MILESTONES,
        gamma=0.5)  #调整学习率learning rate
    # milestons是数组,gamma是倍数,LR初始值为0.01,当milestones达到所设置的3,6,9时,lr的数值乘以gamma,即倍数
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * config.WARM)

    # create checkpoint folder to save model
    model_path = os.path.join(config.PATH, 'model')
    if not os.path.exists(model_path):
        os.mkdir(model_path)
    checkpoint_path = os.path.join(model_path, '{epoch}-{type}.pth')

    best_acc = 0.0
    a = config.EPOCH

    for epoch in range(1, config.EPOCH):

        if epoch > config.WARM:
            train_scheduler.step(epoch)

        ### train ###
        net.train()  #在训练前加上
        train_loss = 0.0  # cost function error
        train_correct = 0.0

        for i, data in enumerate(train_data):
            steps = len(train_data) * (epoch - 1) + i  #计算训练到了第多少步
            if epoch <= config.WARM:
                warmup_scheduler.step()

            length = len(train_data)
            image, label = data
            image, label = image.to(config.DEVICE), label.to(config.DEVICE)

            output = net(image)
            train_correct += get_acc(output, label)
            loss = loss_function(output, label)
            train_loss += loss.item()

            # backward
            optimizer.zero_grad()  #把梯度置零,也就是把loss关于weight的导数变成0
            loss.backward()
            optimizer.step()

            #设置每多少个epoch输出一次损失
            if i % 2 == 0:
                train_loss_log = train_loss / (i + 1)
                train_correct_log = train_correct / (i + 1)
                logger.add_scalar('train_loss', train_loss_log, steps)
                logger.add_scalar('train_acc', train_correct_log, steps)
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tAcc: {:0.4f}\tLR: {:0.6f}'
                    .format(train_loss / (i + 1),
                            train_correct / (i + 1),
                            optimizer.param_groups[0]['lr'],
                            epoch=epoch,
                            trained_samples=i * config.BATCH_SIZE + len(image),
                            total_samples=length_train))
        # start to save best performance model 保存当前训练的最佳的模型
        acc = test_correct / (i + 1)
        if epoch > config.MILESTONES[1] and best_acc < acc:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='best'))
            best_acc = acc
            continue

        if not epoch % config.SAVE_EPOCH:
            torch.save(net.state_dict(),
                       checkpoint_path.format(epoch=epoch, type='regular'))

        ### eval ###
        net.eval()  #在测试前使用
        test_loss = 0.0  # cost function error
        test_correct = 0.0

        for i, data in enumerate(test_data):  #测试刚刚训练的epoch的准确率
            images, labels = data
            images, labels = images.to(config.DEVICE), labels.to(config.DEVICE)

            outputs = net(images)
            loss = loss_function(outputs, labels)
            test_loss += loss.item()
            test_correct += get_acc(outputs, labels)

            print(
                'Testing: [{test_samples}/{total_samples}]\tAverage loss: {:.4f}, Accuracy: {:.4f}'
                .format(test_loss / (i + 1),
                        test_correct / (i + 1),
                        test_samples=i * config.BATCH_SIZE + len(images),
                        total_samples=length_test))
        logger.add_scalar('test_loss', test_loss / (i + 1), epoch)
        logger.add_scalar('test_acc', test_correct / (i + 1), epoch)

        #eval
        net.eval()
        test_loss = 0.0
        test_correct = 0.0
        for i, data in enumerate(cat_test_data):
            images, labels = data
            images, labels = images.to(config.DEVICE), labels.to(config.DEVICE)
            outputs = net(images)
            loss = loss_function(outputs, labels)
            test_loss += loss.item()
            test_correct += get_acc(outputs, labels)
        logger.add_scalar('test_loss_cat', test_loss / (i + 1), epoch)
        logger.add_scalar('test_acc_cat', test_correct / (i + 1), epoch)

        #eval
        net.eval()
        test_loss = 0.0
        test_correct = 0.0
        for i, data in enumerate(dog_test_data):
            images, labels = data
            images, labels = images.to(config.DEVICE), labels.to(config.DEVICE)
            ouputs = net(images)
            loss = loss_function(outputs, labels)
            test_loss += loss.item()
            test_correct += get_acc(outputs, labels)
        logger.add_scalar('test_loss_dog', test_loss / (i + 1), epoch)
        logger.add_scalar('test_acc_dog', test_correct / (i + 1), epoch)

        print()
Esempio n. 29
0
    parser.add_argument(
        '--recompute_poses',
        action='store_true',
        help='Recompute the poses, even if there is a precomputed pose cache.')
    parser.add_argument(
        '--recompute_meshes',
        action='store_true',
        help='Recompute the meshes, even for ones which already exist.')
    args = parser.parse_args()

    if args.batch_i > args.num_batches or args.batch_i <= 0:
        raise ValueError(
            "batch_i cannot be greater than num_batches nor less than 1")

    device = torch.device("cuda:" + str(args.gpu))
    cfg = utils.load_config(args.cfg_path, "configs/default.yaml")
    input_dir_img = cfg['dataset']['input_dir_img']
    input_dir_mesh = cfg['dataset']['input_dir_mesh']

    # making processed meshes output dir
    if input_dir_mesh[-1] == '/': input_dir_mesh = input_dir_mesh[:-1]
    output_dir_mesh = os.path.join(
        "data",
        input_dir_mesh.split('/')[-1] + "_" + args.name,
        "batch_{}_of_{}".format(args.batch_i, args.num_batches))
    if not os.path.exists(output_dir_mesh):
        os.makedirs(output_dir_mesh)

    # finding which instances are in this batch
    instance_names = []
    for mesh_path in glob.glob(os.path.join(input_dir_mesh, "*.obj")):
Esempio n. 30
0
File: main.py Progetto: memoiry/NLU
def train_ner():
    clean(FLAGS)
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)
    update_tag_scheme(dev_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        for i in range(25):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
def train(cfg_path,
          gpu_num,
          experiment_name="semantic_discrim",
          light=False,
          label_noise=0):

    device = torch.device("cuda:" + str(gpu_num))
    cfg = utils.load_config(cfg_path, "configs/default.yaml")
    if light:
        num_workers = 4
        batch_size = 8
    else:
        num_workers = 16
        batch_size = 128

    # setting up dataloader
    train_dataset = SemanticDiscriminatorDataset(cfg, "train")
    val_dataset = SemanticDiscriminatorDataset(cfg, "val")
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               num_workers=num_workers,
                                               shuffle=True,
                                               collate_fn=None,
                                               worker_init_fn=None)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=batch_size,
                                             num_workers=num_workers,
                                             shuffle=False,
                                             collate_fn=None,
                                             worker_init_fn=None)

    # setting up network and optimizer
    semantic_discriminator_net = SemanticDiscriminatorNetwork(cfg)
    semantic_discriminator_net.to(device)
    optimizer = optim.Adam(semantic_discriminator_net.parameters(),
                           lr=0.00001,
                           weight_decay=1e-2)

    # for adding noise to training labels
    real_labels_dist = torch.distributions.Uniform(
        torch.tensor([0.0]), torch.tensor([0.0 + label_noise]))
    fake_labels_dist = torch.distributions.Uniform(
        torch.tensor([1.0 - label_noise]), torch.tensor([1.0]))

    # setting up training
    training_output_dir = os.path.join(
        cfg['semantic_dis_training']['output_dir'],
        "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), experiment_name))
    if not os.path.exists(training_output_dir):
        os.makedirs(training_output_dir)
    df_dict = {"train": pd.DataFrame(), "val": pd.DataFrame()}
    iteration_i = 0
    tqdm_out = utils.TqdmPrintEvery()

    # training
    for epoch_i in tqdm(range(cfg['semantic_dis_training']['epochs']),
                        file=tqdm_out):

        for batch in tqdm(train_loader, file=tqdm_out):
            semantic_discriminator_net.train()
            optimizer.zero_grad()
            batch_size = batch['real'].shape[0]
            real_imgs = batch['real'].to(device)
            fake_imgs = batch['fake'].to(device)
            # real images have label 0, fake images has label 1
            real_labels = real_labels_dist.sample(
                (batch_size, 1)).squeeze(2).to(device)
            fake_labels = fake_labels_dist.sample(
                (batch_size, 1)).squeeze(2).to(device)

            pred_logits_real = semantic_discriminator_net(real_imgs)
            pred_logits_fake = semantic_discriminator_net(fake_imgs)
            loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
                F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
            loss.backward()
            optimizer.step()

            curr_train_info = {
                "epoch": epoch_i,
                "iteration": iteration_i,
                "train_loss": loss.item()
            }
            df_dict["train"] = df_dict["train"].append(curr_train_info,
                                                       ignore_index=True)
            iteration_i += 1
            pickle.dump(
                df_dict,
                open(os.path.join(training_output_dir, "training_info.p"),
                     "wb"))

        # computing validation set accuracy
        # TODO: move this to its own independent method
        print("Computing Validation Set Accuracy...")
        if epoch_i % cfg['semantic_dis_training']['eval_every'] == 0:
            val_accuracies = []
            for val_batch in tqdm(val_loader, file=tqdm_out):
                semantic_discriminator_net.eval()
                with torch.no_grad():
                    pred_logits_real = semantic_discriminator_net(
                        val_batch['real'].to(device))
                    pred_logits_fake = semantic_discriminator_net(
                        val_batch['fake'].to(device))
                    batch_size = val_batch['real'].shape[0]
                    real_labels = torch.zeros((batch_size, 1)).to(device)
                    fake_labels = torch.ones((batch_size, 1)).to(device)
                    real_correct_vec = (torch.sigmoid(pred_logits_real) >
                                        0.5) == real_labels.byte()
                    fake_correct_vec = (torch.sigmoid(pred_logits_fake) >
                                        0.5) == fake_labels.byte()
                    val_accuracies.append(real_correct_vec.cpu().numpy())
                    val_accuracies.append(fake_correct_vec.cpu().numpy())
            val_accuracy = np.mean(np.concatenate(val_accuracies, axis=0))
            print("Validation accuracy: {}".format(val_accuracy.item()))
            curr_val_info = {"epoch": epoch_i, "val_acc": val_accuracy.item()}
            df_dict["val"] = df_dict["val"].append(curr_val_info,
                                                   ignore_index=True)

        # saves model every epoch
        torch.save(
            semantic_discriminator_net.state_dict(),
            os.path.join(training_output_dir,
                         "weights_epoch_{}.pt".format(epoch_i)))

    return df_dict