예제 #1
0
 def connect_network(self, ssid, pw=None, timeout=15):
     self.create_window()
     self.window.text(10, 10, 'Connecting to {}'.format(ssid), ugfx.BLACK)
     if not self.sta_if.active():
         self.sta_if.active(True)
     else:
         self.sta_if.active(False)
         time.sleep(0.3)
         self.sta_if.active(True)
     self.create_status_box()
     self.sta_if.connect(ssid, pw)
     tried = 0
     while self.get_status(self.sta_if) != 'GOT_IP' and tried < timeout:
         self.set_status(self.get_status(self.sta_if))
         time.sleep(0.2)
         tried += 0.2
     if self.get_status(self.sta_if) == 'GOT_IP':
         self.set_status('Connected!')
         config = util.Config('network')
         config['sta_if'] = {
             'ssid': ssid,
             'password': pw,
         }
         config.save()
         time.sleep(1)
         self.set_status('Network configuration saved.')
         time.sleep(1)
         util.reboot()
     else:
         self.set_status('Connection failed.')
예제 #2
0
파일: reader.py 프로젝트: ssoi/mimicnotes
def main(_):
    '''Reader tests'''
    config = util.Config()
    if config.data_storage == 'shelve':
        data = NoteShelveData(config)
    elif config.data_storage == 'pickle':
        data = NotePickleData(config)
    vocab = NoteVocab(config, data)
    reader = NoteICD9Reader(config, data, vocab)
    #    for k, v in vocab.aux_names['dgn'].items():
    #        if v == 'Depressive disorder NEC':
    #            target = vocab.aux_vocab_lookup['dgn'][k]
    for epoch in xrange(1):
        words = 0
        print('Epoch', epoch)
        for batch in reader.get(['val']):
            for i in xrange(batch[0].shape[0]):
                note = batch[0][i]
                words += len(note)


#                label = batch[2][i]
#                print_this = False
#                if label[target]:
#                    for e in note:
#                        if vocab.vocab[e] == 'anxiety':
#                            print_this = True
#                            break
#                if print_this:
#                    for e in note:
#                        print(vocab.vocab[e], end=' ')
#                    print('--------------')
#                    print()
#        print()
        print(words)
def start(clientName, clientConfigPath):
    print(clientName + " Posicione o mouse onde seleciono a Janela...")
    time.sleep(1)

    windowPos = util.waitGetMouseStopped()
    print(clientName + " POSICAO SELECIONAR JANELA: " + str(windowPos))
    time.sleep(0.5)

    print(clientName + " Posicione o mouse onde DEVE COMER FOOD...")
    time.sleep(1)

    foodPos = util.waitGetMouseStopped()
    print(clientName + " POSICAO DE ONDE COMER FOOD: " + str(foodPos))
    time.sleep(0.5)

    print(clientName + " Posicione o mouse onde TEM MANA PRA FAZER A RUNA...")
    time.sleep(1)

    manaPos = util.waitGetMouseStopped()
    print(clientName + " POSICAO E COR DE ONDE TEM MANA PRA FAZER A RUNA: " +
          str(manaPos))
    time.sleep(0.5)

    configObject = util.Config(battlePos=None,
                               foodPos=foodPos,
                               manaPos=manaPos,
                               windowPos=windowPos)
    util.writeConfigJson(configObject, clientConfigPath)
    savedConfig = util.loadConfigFromJson(clientConfigPath)

    print("saved config: " + str(savedConfig))
예제 #4
0
def start():
    print("Posicione o mouse onde APARECERÁ PLAYER NO BATTLE...")
    time.sleep(1)

    battlePos = util.waitGetMouseStopped()
    print("POSICAO E COR DO BATTLE VAZIO: " + str(battlePos))
    time.sleep(0.5)

    print("Posicione o mouse onde DEVE COMER FOOD...")
    time.sleep(1)

    foodPos = util.waitGetMouseStopped()
    print("POSICAO DE ONDE COMER FOOD: " + str(foodPos))
    time.sleep(0.5)

    print("Posicione o mouse onde TEM MANA PRA FAZER A RUNA...")
    time.sleep(1)

    manaPos = util.waitGetMouseStopped()
    print("POSICAO E COR DE ONDE TEM MANA PRA FAZER A RUNA: " + str(manaPos))
    time.sleep(0.5)

    configObject = util.Config(battlePos=battlePos,
                               foodPos=foodPos,
                               manaPos=manaPos,
                               windowPos=None)
    util.writeConfigJson(configObject, util.MAKE_RUNE_CONFIG_PATH)
    savedConfig = util.loadConfigFromJson(util.MAKE_RUNE_CONFIG_PATH)

    print("saved config: " + str(savedConfig))
예제 #5
0
 def __init__(self):
     self.log = logging.getLogger(self.__class__.__name__)
     self._config = util.Config()
     self._item_keys = []
     self._period_data = 60
     self._conn = None
     self._load_config()
예제 #6
0
def start():
    print("Posicione o mouse onde APARECERÁ o player na battle...")
    time.sleep(1)
    battlePos = util.waitGetMouseStopped()
    time.sleep(0.5)

    print("Posicione o mouse no slot de comer food...")
    time.sleep(1)
    foodPos = util.waitGetMouseStopped()
    time.sleep(0.5)

    print("Posicione o mouse onde tem mana (AZUL) para fazer a runa...")
    time.sleep(1)
    manaPos = util.waitGetMouseStopped()
    time.sleep(0.5)

    print("Posicione o mouse no slot de ring do inventário...")
    time.sleep(1)
    inventoryRingSlot = util.waitGetMouseStopped()
    time.sleep(0.5)

    print("Posicione o mouse no primeiro slot de life ring da sua bp...")
    time.sleep(1)
    backpackRingSlot = util.waitGetMouseStopped()
    time.sleep(0.5)

    configObject = util.Config(battlePos=battlePos,
                               foodPos=foodPos,
                               manaPos=manaPos,
                               inventoryRingSlot=inventoryRingSlot,
                               backpackRingSlot=backpackRingSlot)
    util.writeConfigJson(configObject)
    savedConfig = util.loadConfigFromJson()

    print("saved config: " + str(savedConfig))
예제 #7
0
    def load_config(self):
        config = util.Config(friend_codes=data_dir / 'friend_codes.json',
                             preferences=data_dir / 'preferences.json',
                             version_info=data_dir / 'version_info.json',
                             statuses=data_dir / 'statuses.json')

        logging.info('Debug mode: ' +
                     ('ON' if config.preferences['debug'] else 'OFF'))

        return config
예제 #8
0
 def __init__(self, cbis_pod_name, from_date, to_date):
     self.log = logging.getLogger(self.__class__.__name__)
     self._config = util.Config()
     self._item_keys = []
     self._period_data = 60
     self._conn = None
     self._load_config()
     self._cbis_pod_name = cbis_pod_name
     self._from_date = from_date
     self._to_date = to_date
예제 #9
0
 def test_false_if_invalid_input(self):
     ce = MockCallback()
     cfg = util.Config(ce)
     for yml in BAD_SAMPLE_YAML:
         def mock_get_yaml():
             return yml
         cfg.get_yaml = mock_get_yaml
         self.assertFalse(
             cfg.yaml_is_valid(),
             "found the following yaml valid\n{}".format(yml))
예제 #10
0
파일: run.py 프로젝트: ssoi/mimicnotes
def main(_):
    config = util.Config()
    RunnerClass = getattr(importlib.import_module("model"), config.runner)
    if issubclass(RunnerClass, util.TFRunner):
        config_proto = tf.ConfigProto()
        config_proto.gpu_options.allow_growth = True
        with tf.Graph().as_default(), tf.Session(
                config=config_proto) as session:
            RunnerClass(config, session).run()
    else:
        RunnerClass(config).run()
예제 #11
0
 def test_config_no_exceptions_getting_yaml(self):
     try:
         ce = MockCallback()
         cfg = util.Config(ce)
         init_raised = False
         try:
             yml = cfg.get_yaml()
             yml_raised = False
         except:
             yml_raised =True
     except:
         init_raised = True
     self.assertFalse(init_raised, "Config.__init__ raised an exception")
     self.assertFalse(yml_raised, "Config.get_yml raised an exception")
예제 #12
0
파일: prepare.py 프로젝트: ssoi/mimicnotes
def main(_):
    config = util.Config()
    if config.data_storage == 'shelve':
        data = util.NoteShelveData(config)
    elif config.data_storage == 'pickle':
        data = util.NotePickleData(config)
    vocab = util.NoteVocab(config, data)
    if config.visualize:
        print('Stats:')
        data.print_stats(vocab)
    reader = util.NoteICD9Reader(config, data, vocab)
    #    for batch in reader.get(['train']):
    #        for w in batch[0][0]:
    #            print(vocab.vocab[w], end=' ')
    #        print()
    print('All done!')
예제 #13
0
def full_check(thread_manager):
    """
    Checks for missing or corrupt files and directories, and restores them where necessary.
    :return: bool - Whether any changes have been made
    """
    modified = False

    # create directories if they don't exist yet
    data_dir.mkdir(exist_ok=True)
    (data_dir / 'cache').mkdir(exist_ok=True)
    (script_dir / 'logs').mkdir(exist_ok=True)
    (script_dir / 'logs' / 'errors').mkdir(exist_ok=True)

    temp_config = util.Config(friend_codes=data_dir / 'friend_codes.json',
                              preferences=data_dir / 'preferences.json',
                              version_info=data_dir / 'version_info.json',
                              statuses=data_dir / 'statuses.json')

    for file, operation in file_operations.items():
        path = data_dir / file
        if path.exists():
            continue

        modified = True
        if operation == 'create':
            create_json(path)
        elif operation == 'download':
            # defaults
            username = '******'
            repo = 'wiimmfi-rpc'
            branch = 'master'

            if temp_config.version_info.complete:
                username = temp_config.version_info['git']['username']
                repo = temp_config.version_info['git']['repo']
                branch = temp_config.version_info['git']['branch']

            url = download_base_url.format(username=username,
                                           repo=repo,
                                           branch=branch,
                                           file=file)

            download_thread = util.GithubDownloadThread('GET', url)
            thread_manager.add_thread(download_thread)

    return modified
예제 #14
0

# return type: int
# string 안에 숫자가 하나 있을 경우 가져옴(/n/t/t/t/t/t10ms/t/t/t/t/t/)
def extractTime(statString):
    statString = statString.strip()

    if (statString[0:-2]).isdigit():
        return int(statString[0:-2])
    else:
        return None


#MAIN 파트 시작
if __name__ == "__main__":
    config = util.Config(CONFIG_PATH)  #config 객체 생성
    userIds = config.getUserIds()  # config 객체에서 user id를 읽어옴
    token = config.getGitToken()  # git api에 연결할 토큰 가져옴
    userResultDicts = [dict() for i in range(len(userIds))
                       ]  # 각 유저의 문제별 시간을 dict으로 저장할 자료구조 생성

    # 토큰을 통해서 git api 연결
    gitCli = git.GitConnector(token)
    # 해당 REPO의 file 컨텐츠 다운
    gitContentStr = gitCli.getDecodedContents(REPO_NAME, FILE_NAME)

    # 각 유저에 대해 결과 dictionary 채움
    for idx, user in enumerate(userIds):
        tmpDict = dict()
        bsObject = getBSObject(user, 1)
        pageNumber = int(countPage(bsObject))
예제 #15
0
def train_net(model, args):
	ann_path = '../FashionAI/data/train/Annotations/trainminusval.csv'
	img_dir = '../FashionAI/data/train/'

	stride = 8
	cudnn.benchmark = True
	config = util.Config('./config.yml')

	train_loader = torch.utils.data.DataLoader(
		dataset_loader.dataset_loader(img_dir, ann_path, stride,
		                              transforms.ToTensor()),
		batch_size=config.batch_size, shuffle=True,
		num_workers=config.workers, pin_memory=True)

	criterion = nn.MSELoss().cuda()
	params, multiple = get_parameters(model, config, False)

	optimizer = torch.optim.SGD(params, config.base_lr, momentum=config.momentum,
	                            weight_decay=config.weight_decay)
	model.train()
	iters = 0
	batch_time = util.AverageMeter()
	data_time = util.AverageMeter()
	losses = util.AverageMeter()
	losses_list = [util.AverageMeter() for i in range(12)]
	end = time.time()

	heat_weight = 48 * 48 * 25 / 2.0  # for convenient to compare with origin code
	# heat_weight = 1

	while iters < config.max_iter:
		for i, (input, heatmap) in enumerate(train_loader):
			learning_rate = util.adjust_learning_rate(optimizer, iters, config.base_lr, policy=config.lr_policy,\
								policy_parameter=config.policy_parameter, multiple=multiple)
			data_time.update(time.time() - end)

			input = input.cuda(async=True)
			heatmap = heatmap.cuda(async=True)
			input_var = torch.autograd.Variable(input)
			heatmap_var = torch.autograd.Variable(heatmap)

			heat1, heat2, heat3, heat4, heat5, heat6 = model(input_var)
			loss1 = criterion(heat1,heatmap_var) * heat_weight
			loss2 = criterion(heat2, heatmap_var) * heat_weight
			loss3 = criterion(heat3, heatmap_var) * heat_weight
			loss4 = criterion(heat4, heatmap_var) * heat_weight
			loss5 = criterion(heat5, heatmap_var) * heat_weight
			loss6 = criterion(heat6, heatmap_var) * heat_weight
			loss = loss1 + loss2 + loss3 + loss4 + loss5 + loss6
			losses.update(loss.data[0], input.size(0))
			loss_list = [loss1 , loss2 , loss3 , loss4 , loss5 , loss6]
			for cnt, l in enumerate(loss_list):
				losses_list[cnt].update(l.data[0], input.size(0))

			optimizer.zero_grad()
			loss.backward()
			optimizer.step()
			batch_time.update(time.time() - end)
			end = time.time()


			iters += 1
			if iters % config.display == 0:
				print('Train Iteration: {0}\t'
				      'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
				      'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
				      'Learning rate = {2}\n'
				      'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
					iters, config.display, learning_rate, batch_time=batch_time,
					data_time=data_time, loss=losses))
				for cnt in range(0, 6):
					print('Loss{0}_1 = {loss1.val:.8f} (ave = {loss1.avg:.8f})'.format(cnt + 1,loss1=losses_list[cnt]))
				print(time.strftime(
					'%Y-%m-%d %H:%M:%S -----------------------------------------------------------------------------------------------------------------\n',
					time.localtime()))

				batch_time.reset()
				data_time.reset()
				losses.reset()
				for cnt in range(12):
					losses_list[cnt].reset()

			if iters % 5000 == 0:
				torch.save({
					'iter': iters,
					'state_dict': model.state_dict(),
				},  str(iters) + '.pth.tar')

			if iters == config.max_iter:
				break
	return
예제 #16
0
def train_net(model, args):

    ann_path = '../FashionAI/data/train/Annotations/trainminusval.csv'
    img_dir = '../FashionAI/data/train/'

    stride = 8
    cudnn.benchmark = True
    config = util.Config('./config.yml')
    train_loader = torch.utils.data.DataLoader(dataset_loader.dataset_loader(
        img_dir,
        ann_path,
        stride,
        Mytransforms.Compose([
            Mytransforms.RandomResized(),
            Mytransforms.RandomRotate(40),
            Mytransforms.RandomCrop(384),
        ]),
        sigma=15),
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=config.workers,
                                               pin_memory=True)

    criterion = nn.MSELoss().cuda()
    params = []
    for key, value in model.named_parameters():
        if value.requires_grad != False:
            params.append({'params': value, 'lr': config.base_lr})

    optimizer = torch.optim.SGD(params,
                                config.base_lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)
    # model.train() # only for bn and dropout
    model.eval()

    from matplotlib import pyplot as plt

    iters = 0
    batch_time = util.AverageMeter()
    data_time = util.AverageMeter()
    losses = util.AverageMeter()
    losses_list = [util.AverageMeter() for i in range(12)]
    end = time.time()

    heat_weight = 48 * 48 * 25 / 2.0  # for convenient to compare with origin code
    # heat_weight = 1

    while iters < config.max_iter:
        for i, (input, heatmap) in enumerate(train_loader):
            learning_rate = util.adjust_learning_rate(optimizer, iters, config.base_lr, policy=config.lr_policy,\
                 policy_parameter=config.policy_parameter)
            data_time.update(time.time() - end)

            input = input.cuda(async=True)
            heatmap = heatmap.cuda(async=True)
            input_var = torch.autograd.Variable(input)
            heatmap_var = torch.autograd.Variable(heatmap)

            heat = model(input_var)

            # feat = C4.cpu().data.numpy()
            # for n in range(100):
            # 	plt.subplot(10, 10, n + 1);
            # 	plt.imshow(feat[0, n, :, :], cmap='gray')
            # 	plt.xticks([]);
            # 	plt.yticks([])
            # plt.show()

            loss1 = criterion(heat, heatmap_var) * heat_weight
            # loss2 = criterion(heat4, heatmap_var) * heat_weight
            # loss3 = criterion(heat5, heatmap_var) * heat_weight
            # loss4 = criterion(heat6, heatmap_var) * heat_weight
            # loss5 = criterion(heat, heatmap_var)
            # loss6 = criterion(heat, heatmap_var)

            loss = loss1  # + loss2 + loss3# + loss4# + loss5 + loss6
            losses.update(loss.data[0], input.size(0))
            loss_list = [loss1]  #, loss2, loss3]# , loss4 ]# , loss5 , loss6]
            for cnt, l in enumerate(loss_list):
                losses_list[cnt].update(l.data[0], input.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            batch_time.update(time.time() - end)
            end = time.time()

            iters += 1
            if iters % config.display == 0:
                print(
                    'Train Iteration: {0}\t'
                    'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {2}\n'
                    'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                        iters,
                        config.display,
                        learning_rate,
                        batch_time=batch_time,
                        data_time=data_time,
                        loss=losses))
                for cnt in range(0, 1):
                    print(
                        'Loss{0}_1 = {loss1.val:.8f} (ave = {loss1.avg:.8f})'.
                        format(cnt + 1, loss1=losses_list[cnt]))
                print(
                    time.strftime(
                        '%Y-%m-%d %H:%M:%S -----------------------------------------------------------------------------------------------------------------\n',
                        time.localtime()))

                batch_time.reset()
                data_time.reset()
                losses.reset()
                for cnt in range(12):
                    losses_list[cnt].reset()

            if iters % 5000 == 0:
                torch.save({
                    'iter': iters,
                    'state_dict': model.state_dict(),
                },
                           str(iters) + '.pth.tar')

            if iters == config.max_iter:
                break
    return
예제 #17
0
import hashlib
import os
import subprocess
import sys
import time

from gevent.pywsgi import WSGIServer
from geventwebsocket import WebSocketError
from geventwebsocket.websocket import Header

import util

# bottle.debug(True)

app = bottle.Bottle()
config = util.Config()

@app.get('/upload')
def upload():
  return bottle.template('upload', title=config.title(), text=config.text())

@app.route('/static/<filepath:path>')
def serve_static(filepath):
  return bottle.static_file(filepath, root='./static')

@app.route('/websocket')
def handle_websocket():
  start_time = time.time()
  wsock = bottle.request.environ.get('wsgi.websocket')
  if not wsock:
    abort(400, 'Expected Websocket request.')
def train_net():
    annList = [
        '../data/train/Annotations/blouse.csv',
        '../data/train/Annotations/dress.csv',
        '../data/train/Annotations/outwear.csv',
        '../data/train/Annotations/skirt.csv',
        '../data/train/Annotations/trousers.csv'
    ]
    classNumList = [13, 15, 14, 4, 7]
    index_array = [[2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16],
                   [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20],
                   [2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
                   [17, 18, 19, 20], [17, 18, 21, 22, 23, 24, 25]]

    paramsNameList = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']
    modelSaveList = [
        '../saveparameter/blouse/', '../saveparameter/dress/',
        '../saveparameter/outwear/', '../saveparameter/skirt/',
        '../saveparameter/trousers/'
    ]
    paramsOldList = [
        '../saveparameter/blouse/3000res50.pth.tar',
        '../saveparameter/dress/15000new2.pth.tar',
        '../saveparameter/outwear/10000new2.pth.tar',
        '../saveparameter/skirt/5000new2.pth.tar',
        '/home/tanghm/Documents/YFF/project/saveparameter/trousers/15000new2.pth.tar'
    ]
    for idx in range(0, 1):
        #打印当前训练的服饰类别
        print('train' + paramsNameList[idx])
        #该服饰一共需要预测多少个关键点
        numpoints = classNumList[idx]
        #构建模型
        model = construct_model(numpoints)
        state_dict = torch.load(paramsOldList[idx])['state_dict']
        model.load_state_dict(state_dict)
        # lable文件的路径
        ann_path = annList[idx]
        #图像所在路径
        img_dir = '../data/train/'

        stride = 8
        cudnn.benchmark = True
        config = util.Config('./config.yml')
        #构建训练的数据
        train_loader = torch.utils.data.DataLoader(
            dataset_loader.dataset_loader(numpoints,
                                          img_dir,
                                          ann_path,
                                          stride,
                                          Mytransforms.Compose([
                                              Mytransforms.RandomResized(),
                                              Mytransforms.RandomRotate(40),
                                              Mytransforms.RandomCrop(384),
                                          ]),
                                          sigma=15),
            batch_size=config.batch_size,
            shuffle=True,
            num_workers=config.workers,
            pin_memory=True)
        #网络的loss函数类型
        if (torch.cuda.is_available()):
            criterion = nn.MSELoss().cuda()
        params = []
        for key, value in model.named_parameters():
            if value.requires_grad != False:
                params.append({'params': value, 'lr': config.base_lr})

        # optimizer = torch.optim.SGD(params, config.base_lr, momentum=config.momentum,
        #                             weight_decay=config.weight_decay)
        optimizer = torch.optim.Adam(params,
                                     lr=config.base_lr,
                                     betas=(0.9, 0.99),
                                     weight_decay=config.weight_decay)
        # model.train() # only for bn and dropout
        model.eval()

        # from matplotlib import pyplot as plt

        iters = 0
        batch_time = util.AverageMeter()
        data_time = util.AverageMeter()
        losses = util.AverageMeter()
        losses_list = [util.AverageMeter() for i in range(12)]
        end = time.time()

        heat_weight = 48 * 48 * (
            classNumList[idx] +
            1) / 2.0  # for convenient to compare with origin code
        # heat_weight = 1

        while iters < config.max_iter:
            #input 表示图片,heatmap表示网络输出值
            for i, (input, heatmap) in enumerate(train_loader):
                learning_rate = util.adjust_learning_rate(optimizer, iters, config.base_lr, policy=config.lr_policy, \
                                                          policy_parameter=config.policy_parameter)
                data_time.update(time.time() - end)
                if (torch.cuda.is_available()):
                    input = input.cuda(async=True)
                    heatmap = heatmap.cuda(async=True)
                input_var = torch.autograd.Variable(input)
                heatmap_var = torch.autograd.Variable(heatmap)
                #将图像进行tensor和Variable转化后喂进模型
                heat = model(input_var)

                # feat = C4.cpu().data.numpy()
                # for n in range(100):
                #     plt.subplot(10, 10, n + 1);
                #     plt.imshow(feat[0, n, :, :], cmap='gray')
                #     plt.xticks([]);
                #     plt.yticks([])
                # plt.show()

                loss1 = criterion(heat, heatmap_var) * heat_weight
                # loss2 = criterion(heat4, heatmap_var) * heat_weight
                # loss3 = criterion(heat5, heatmap_var) * heat_weight
                # loss4 = criterion(heat6, heatmap_var) * heat_weight
                # loss5 = criterion(heat, heatmap_var)
                # loss6 = criterion(heat, heatmap_var)

                loss = loss1  # + loss2 + loss3# + loss4# + loss5 + loss6
                losses.update(loss.data[0], input.size(0))
                loss_list = [loss1
                             ]  # , loss2, loss3]# , loss4 ]# , loss5 , loss6]
                for cnt, l in enumerate(loss_list):
                    losses_list[cnt].update(l.data[0], input.size(0))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                batch_time.update(time.time() - end)
                end = time.time()

                iters += 1
                if iters % config.display == 0:
                    print(
                        'Train Iteration: {0}\t'
                        'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                        'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                        'Learning rate = {2}\n'
                        'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.
                        format(iters,
                               config.display,
                               learning_rate,
                               batch_time=batch_time,
                               data_time=data_time,
                               loss=losses))
                    for cnt in range(0, 1):
                        print(
                            'Loss{0}_1 = {loss1.val:.8f} (ave = {loss1.avg:.8f})'
                            .format(cnt + 1, loss1=losses_list[cnt]))
                    print(
                        time.strftime(
                            '%Y-%m-%d %H:%M:%S -----------------------------------------------------------------------------------------------------------------\n',
                            time.localtime()))

                    batch_time.reset()
                    data_time.reset()
                    losses.reset()
                    for cnt in range(12):
                        losses_list[cnt].reset()

                if iters % 1000 == 0:
                    torch.save(
                        {
                            'iter': iters,
                            'state_dict': model.state_dict(),
                        }, modelSaveList[idx] + str(iters) + 'res50.pth.tar')
                    with open('./logLoss2.txt', 'a') as f:
                        f.write(
                            'Train Iteration: {0}\t'
                            'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                            'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                            'Learning rate = {2}\n'
                            'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.
                            format(iters,
                                   config.display,
                                   learning_rate,
                                   batch_time=batch_time,
                                   data_time=data_time,
                                   loss=losses) + '\n')

                if iters == config.max_iter:
                    break

    return
예제 #19
0
파일: kyeong.py 프로젝트: sio4/honcheonui
	if message.get('abort', False) and message['abort']['flag']:
		log.error('service aborted by %s.' % message['abort']['owner'])
		log.error('-- reason: %s.' % message['abort']['reason'])
		exit(0)
	return

###
### start main routine	------------------------------------------------------
###
options = get_options()

log = util.Log('hcu-%s' % NAME)
log.info("initializing...")

try:
	cf = util.Config(options.config)
except util.configError as e:
	log.fatal('%s' % e)

# override some configuration (agent mode).
cf.set('honcheonui/name', 'honcheonui-%s' % NAME)
cf.set('honcheonui/version', VERSION)

log.set_level(cf.get('honcheonui/loglevel'))
log.info('%s configured properly...' % cf.get('honcheonui/name'))

# FIXME reachable test required!
if cf.get('master/host') == '':
	log.fatal("server not configured properly.", os.EX_CONFIG)

### go background!	------------------------------------------------------
예제 #20
0
파일: git.py 프로젝트: ryuch91/algo-crawler
		contents = repo.get_contents(file_name)
		repo.update_file(contents.path, commit_msg, input_text, contents.sha, branch=branch_name)
		

# token을 통해 github 연결체를 만들고 해당 repo에서 특정 content를 읽어옴
def updateMDFile(inputText):
	ACCESS_TOKEN = util.readGitToken()
	gitCli = Github(ACCESS_TOKEN)

	algoRepo = gitCli.get_repo("yhchoi0225/AlgoBoard")
	algoContents = algoRepo.get_contents("README.md")
	
	# 처음 텍스트가 commit message, 두번째 텍스트가 실제로 삽입되는 텍스트
	algoRepo.update_file(algoContents.path, "README update by auto crawler",inputText, algoContents.sha,branch="updateStatus")

	return algoContents

def readMDFile():
	ACCESS_TOKEN = util.readGitToken()
	gitCli = Github(ACCESS_TOKEN)
	algoRepo = gitCli.get_repo("yhchoi0225/AlgoBoard")
	algoContents = algoRepo.get_contents("README.md")

	return algoContents

if __name__=='__main__':
	config = util.Config('./secure.conf')
	git_connector = GitConnector(config.getGitToken())
	git_decoded_contents = git_connector.getDecodedContents('yhchoi0225/AlgoBoard', 'README.md')
	print(git_decoded_contents)
예제 #21
0
 def __init__(self):
     self.log = logging.getLogger(self.__class__.__name__)
     self._config = util.Config()
     self._conn = None
예제 #22
0

#########################################################################################################################

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='config.json',
        help='Experiment configuration file path (json format).')
    parser.add_argument('--blind',
                        action='store_true',
                        help='Predict based in the output of the network')
    FLAGS, unparsed = parser.parse_known_args()
    config = ut.Config()
    config.load_from_file(FLAGS.config)
    reader = config.data_reader_class()
    data_dict = reader.load_data(config.data_folder)
    layer_in = len(config.x_features)
    layer_out = len(config.y_features)
    decoded = decode_solution(config.solution, layer_in, layer_out)
    model_name = '-'.join(map(str, decoded['rnn_arch'])) + '.'
    model_name = model_name + str(decoded['look_back']) + '.'
    model_name = model_name + str(decoded['drop_out'])
    model_file = config.models_folder + model_name + '.hdf5'
    # verify if the model already exists
    if not os.path.isfile(model_file):
        trainer = nn.BPTrainRNN(rnn_arch=decoded['rnn_arch'],
                                drop_out=decoded['drop_out'],
                                model_file=model_file,
예제 #23
0
import network

import util

conf = util.Config('network')
if 'sta_if' in conf:
    sta_if = network.WLAN(network.STA_IF)
    sta_if.active(True)
    sta_conf = conf['sta_if']
    if 'ssid' in sta_conf:
        pw = sta_conf['password'] if 'password' in sta_conf else None
        sta_if.connect(sta_conf['ssid'], pw)
        del pw
    del sta_conf
del conf