Example #1
0
def main():
    args = arg_parser()

    if args.mode == "train":
        env = environment.make(args.env, args)
        if args.networks == "MLP":
            nn = MLP(env.observation_space.shape[0], env.action_space,
                     args.n_frames)
        elif args.networks == "CONV":
            nn = CONV(args.n_frames, env.action_space)

        optimizer = SharedAdam(nn.parameters())

        threads = []
        thread = mp.Process(target=test, args=(args, nn))
        thread.start()
        threads.append(thread)

        for i in range(0, args.n_workers):
            thread = mp.Process(target=train, args=(i, args, nn, optimizer))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()
    elif args.mode == "test":
        evaluate(args)
Example #2
0
def main():
    torch.manual_seed(SEED)
    args = arg_parser()

    dataset = MADataset(args.len_dataset, args.connectivity_path,
                        args.task_path, args.city_path, args.reward_path,
                        args.destination_path)
    data_loader = DataLoader(dataset)

    trainer = Trainer(args=args,
                      n_agents=args.n_agents,
                      n_cities=args.n_cities,
                      device=DEVICE,
                      data_loader=data_loader)

    memory = ReplayMemory(MEMORY_CAPACITY)

    for i in range(args.n_envs):
        trainer.gen_env()
        print("+----------------------------------------------+\n"
              "|               Environment: {0}                 |\n"
              "+----------------------------------------------+\n".format(
                  trainer.idx_env))
        for epoch in tqdm(range(args.epochs)):
            epoch_reward = 0.0
            trainer.optimizer.zero_grad()

            for step in range(args.steps):
                transitions = trainer.step()
                if transitions == "done":
                    break
                epoch_reward += sum([t.reward for t in transitions])
                memory.push_all(transitions)
            # passed all time-steps and reset
            trainer.env.reset()
            if epoch % 10 == 0:
                samples = memory.sample(args.batch_size)
                # print("\n")
                # print(samples)
                # calc maxQ(s_t+1) then calc loss
                loss = trainer.calc_loss(samples)
                loss.backward()
                # for param in trainer.DQN.parameters():
                #     param.grad.data.clamp_(-1, 1)
                trainer.optimizer.step()
                # if epoch % 100 == 0:
                print("\n Epoch: {0} reward: {1} loss: {2}\n".format(
                    epoch, epoch_reward, loss.float()))
Example #3
0
    data_list_append = data_list.append
    tag_set = set()
    for child in root:
        data = child.attrib
        #body = " ".join(re.sub(_regex, ' ', data['Body']).split())
        title = None
        if data.get('Title'):
            title = " ".join(re.sub(_regex, ' ', data['Title']).split())
        tags = None
        if data.get('Tags'):
            tags = re.sub('<|>', ' ', data['Tags']).split()
            tag_set = tag_set.union(set(tags))
        if title == None or tags == None:
            continue
        #data_list_append({'body': body, 'tags': tags, 'title': title})

    #f = open(outfile+'.json', 'w')
    #dumps = json.dumps(data_list, sort_keys=True, indent=2)
    #f.write(dumps)
    #f.close()

    fl = open(outfile + 'tags.txt', 'w')
    tag_list = list(tag_set)
    for item in tag_list:
        fl.write("%s\n" % item)


if __name__ == '__main__':
    infile, outfile = arg_parser()
    main(infile, outfile)
Example #4
0
                r, w, e = select.select([], clients, [], 0)
            except Exception as e:
                pass

        for s_client in w:
            srv_response = encode_json(response(200))
            try:
                data = s_client.recv(1024)
                print(decode_json(data))
                s_client.send(srv_response)
            except:
                clients.remove(s_client)


if __name__ == '__main__':
    parser = arg_parser()
    namespace = parser.parse_args()

    if namespace.addr:
        HOST = namespace.addr
        PORT = namespace.port
    elif namespace.port:
        HOST = ''
        PORT = namespace.port
    else:
        HOST = ''
        PORT = 7777

    print('Server started')
    srv_loop()
Example #5
0
import matplotlib as plt
import pdb
import os
import argparse
from tensorboardX import SummaryWriter
import time

from Networks import *
import utils
# from dropout import create_adversarial_dropout_mask, calculate_jacobians

_DIGIT_ROOT = '~/dataset/digits/'
_PREFIX = ''

opt = argparse.ArgumentParser()
opt = utils.arg_parser(opt)
opt = opt.parse_args()

### For Tensorboard
#   cur = time.time()
#   run_dir = "runs/{0}".format(curtime[0:19])

#   writer = SummaryWriter(run_dir)


# writer.add_image('generated', sum_img.view(3, 256, 512), epoch)
# writer.add_image('generated', sum_img.view(3, 256, 512), epoch)
#             self.writer.add_scalar('PA', PA, self.current_epoch)
#             self.writer.add_scalar('MPA', MPA, self.current_epoch)
#             self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
#             self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
Example #6
0
from utils import arg_parser
from grid_map_generator import GridMapGenerator

if __name__ == "__main__":
    args = arg_parser()

    generator = GridMapGenerator()
    generator.generate(args.src, args.scaler)
Example #7
0
from data_loader import BSDSLoader
from models import HED, convert_vgg, weights_init
from functions import cross_entropy_loss  # sigmoid_cross_entropy_loss
from utils import Logger, Averagvalue, save_checkpoint, load_vgg16pretrain, arg_parser, tune_lrs

from scipy.io import savemat

root = '../..'
args = arg_parser(dataset=join(root, 'HED-BSDS'),
                  batch_size=1,
                  lr=1e-6,
                  momentum=0.9,
                  weight_decay=2e-4,
                  stepsize=3,
                  gamma=0.1,
                  start_epoch=0,
                  maxepoch=10,
                  itersize=10,
                  print_freq=50,
                  gpu='1',
                  resume='',
                  tmp=join('tmp', 'HED'))

args.model_path = join(root, 'pretrained_models', 'vgg16.pth')

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

THIS_DIR = abspath(dirname(__file__))
TMP_DIR = join(THIS_DIR, args.tmp)
if not isdir(TMP_DIR):
Example #8
0
"""

# Library imports
import time
import pandas as pd
import numpy as np

# Custom imports
import utils
import plot
import read_h5 as read
import preprocessing as pp
import neural_net as nn
import kmeans as km

args = utils.arg_parser()

# Read data from h5 files into dataframe
###############################################################################
t_start = time.time()
df = read.h5_to_df('../../MillionSongSubset/data', args.size, args.initialize)
t_extract = time.time()
print('\nGot', len(df.index), 'songs in', round((t_extract - t_start), 2),
      'seconds.')

# Setup directory for preprocessing and model storage
###############################################################################
path = utils.setup_model_dir()

# Transform data into vectors for processing by neural network
###############################################################################