Exemple #1
0
def main():

    netconfig, hyperparams_config, data_config = config()
    model = SELUNet(dropout=netconfig["alphadropout"])
    model = load_model(model, netconfig["model_resume_file"])

    dump_config(model, netconfig, hyperparams_config, data_config)
    # copy_files(netconfig["model_dir"])

    train_data, validate_data, test_data = create_train_validate_test_dataloader(
        netconfig)

    saver = Saver(netconfig["save_dir"])
    use_cuda = netconfig["use_cuda"]
    writer = SummaryWriter(netconfig["writer_dir"])
    tee = Tee(netconfig["tee_file"])

    optimizer = optim.Adamax(model.parameters())
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          netconfig["lr_decay_epochs"],
                                          netconfig["lr_decay"])
    loss = construct_loss(netconfig)

    finalconfig = {
        "train_data": train_data,
        "test_data": validate_data,
        "model": model,
        "saver": saver,
        "use_cuda": use_cuda,
        "epochs": netconfig["epochs"],
        "optimizer": optimizer,
        "scheduler": scheduler,
        "loss": loss,
        "writer": writer,
        "tee": tee,
        "model_dir": netconfig["model_dir"],
        "test_interval": netconfig["test_interval"],
        "window": netconfig["window"],
        "stride": netconfig["stride"],
        "idr_interval": netconfig["idr_interval"],
    }

    train_test_loop(finalconfig)

    if test_data:
        metrics = test(model, loss, test_data, use_cuda)

        tee.writeln("Test: " + " ".join(("{: >5}: {:.4f}".format(k, v)
                                         for k, v in metrics.items())))
    def send_worker(address, send_queue, worker_alive):
        timing = AccumDict()
        log = Tee('/tmp/send_worker.log')

        ctx = SerializingContext()
        sender = ctx.socket(zmq.PUSH)
        sender.connect(address)

        log(f"Sending to {address}")

        try:
            while worker_alive.value:
                tt = TicToc()

                try:
                    msg = send_queue.get(timeout=GET_TIMEOUT)
                except queue.Empty:
                    continue

                tt.tic()
                sender.send_data(*msg)
                timing.add('SEND', tt.toc())

                if opt.verbose:
                    Once(timing, log, per=1)
        except KeyboardInterrupt:
            log("send_worker: user interrupt")
        finally:
            worker_alive.value = 0

        sender.disconnect(address)
        sender.close()
        ctx.destroy()
        log("send_worker exit")
Exemple #3
0
def main():
    stdout = sys.stdout
    f = open('stdout.txt', 'w')
    sys.stdout = Tee(sys.stdout, f)
    classifiers = [
        (ScikitNeuralNetClassifier, [[0, 10, 0]]),
        (ScikitNeuralNetClassifier, [[0, 15, 0]]),
        (ScikitNeuralNetClassifier, [[0, 30, 0]]),
        (ScikitNeuralNetClassifier, [[0, 50, 0]]),
        (ScikitNeuralNetClassifier, [[0, 100, 0]]),
        (ScikitNaiveBayesClassifier, []),
        (NearestNeighborsClassifier, [5]),
        (NearestNeighborsClassifier, [10]),
        (NearestNeighborsClassifier, [15]),
    ]
    features = [
        FaceLandmarkFeatureConverter,
        FaceBoundaryFeatureConverter,
        FaceLandmarkBoundaryFeatureConverter,
    ]
    age_groups = [
        (12, 23, 100),
        (7, 15, 22, 100),
        (5, 12, 18, 100),
        (5, 10, 15, 20, 25, 100),
    ]

    for cls, param in classifiers:
        for ftr in features:
            for ag in age_groups:
                print "Classifier:", cls
                print "Classifier param:", param
                print "Feature:", ftr
                print "Age Group:", ag
                res = run(cls, [ftr(AgeBucket(*ag))] + param)
                print "Avg. Correct:", res[0]
                print "Avg. Accuracy:", res[1] * 100, "%"
                print "BTRG:", res[2], "times"
                sys.stdout.flush()
    f.close()
    def recv_worker(port, recv_queue, worker_alive):
        timing = AccumDict()
        log = Tee('./logs/recv_worker.log')

        ctx = SerializingContext()
        socket = ctx.socket(zmq.PULL)
        socket.bind(f"tcp://*:{port}")
        socket.RCVTIMEO = RECV_TIMEOUT

        log(f'Receiving on port {port}')

        try:
            while worker_alive.value:
                tt = TicToc()

                try:
                    tt.tic()
                    msg = socket.recv_data()
                    timing.add('RECV', tt.toc())
                except zmq.error.Again:
                    log("recv timeout")
                    continue

                #log('recv', msg[0])

                method, data = msg
                if method['critical']:
                    recv_queue.put(msg)
                else:
                    try:
                        recv_queue.put(msg, block=False)
                    except queue.Full:
                        log('recv_queue full')

                Once(timing, log, per=1)
        except KeyboardInterrupt:
            log("recv_worker: user interrupt")

        worker_alive.value = 0
        log("recv_worker exit")
    def __init__(self, *args, in_addr=None, out_addr=None, **kwargs):
        self.in_addr = in_addr
        self.out_addr = out_addr
        self.predictor_args = (args, kwargs)
        self.timing = AccumDict()
        self.log = Tee('/tmp/predictor_remote.log')

        self.send_queue = mp.Queue(QUEUE_SIZE)
        self.recv_queue = mp.Queue(QUEUE_SIZE)

        self.worker_alive = mp.Value('i', 0)

        self.send_process = mp.Process(target=self.send_worker,
                                       args=(self.in_addr, self.send_queue,
                                             self.worker_alive),
                                       name="send_process")
        self.recv_process = mp.Process(target=self.recv_worker,
                                       args=(self.out_addr, self.recv_queue,
                                             self.worker_alive),
                                       name="recv_process")

        self._i_msg = -1
    def recv_worker(address, recv_queue, worker_alive):
        timing = AccumDict()
        log = Tee('/tmp/recv_worker.log')

        ctx = SerializingContext()
        receiver = ctx.socket(zmq.PULL)
        receiver.connect(address)
        receiver.RCVTIMEO = RECV_TIMEOUT

        log(f"Receiving from {address}")

        try:
            while worker_alive.value:
                tt = TicToc()

                try:
                    tt.tic()
                    msg = receiver.recv_data()
                    timing.add('RECV', tt.toc())
                except zmq.error.Again:
                    continue

                try:
                    recv_queue.put(msg, timeout=PUT_TIMEOUT)
                except queue.Full:
                    log('recv_queue full')
                    continue

                if opt.verbose:
                    Once(timing, log, per=1)
        except KeyboardInterrupt:
            log("recv_worker: user interrupt")
        finally:
            worker_alive.value = 0

        receiver.disconnect(address)
        receiver.close()
        ctx.destroy()
        log("recv_worker exit")
    def send_worker(port, send_queue, worker_alive):
        timing = AccumDict()
        log = Tee('./logs/send_worker.log')

        ctx = SerializingContext()
        socket = ctx.socket(zmq.PUSH)
        socket.bind(f"tcp://*:{port}")

        log(f'Sending on port {port}')

        try:
            while worker_alive.value:
                tt = TicToc()

                try:
                    method, data = send_queue.get(timeout=GET_TIMEOUT)
                except queue.Empty:
                    log("send queue empty")
                    continue

                # get the latest non-critical request from the queue
                # don't skip critical request
                while not send_queue.empty() and not method['critical']:
                    log(f"skip {method}")
                    method, data = send_queue.get()

                log("sending", method)

                tt.tic()
                socket.send_data(method, data)
                timing.add('SEND', tt.toc())

                Once(timing, log, per=1)
        except KeyboardInterrupt:
            log("predictor_worker: user interrupt")

        worker_alive.value = 0
        log("send_worker exit")
Exemple #8
0
database_path = args.database_path
saved_models_path = args.saved_models_path
experiment_name = args.experiment_name
num_workers = args.num_workers
resume_epoch = args.resume_epoch
batch_size = args.batch_size
max_views = args.max_views
lr = args.LR
weight_decay = args.weight_decay
num_epochs = args.num_epochs

# save log
if not os.path.isdir(os.path.join(saved_models_path, experiment_name)):
    os.makedirs(os.path.join(saved_models_path, experiment_name))
f = open(os.path.join(saved_models_path, experiment_name, 'log.txt'), 'a')
sys.stdout = Tee(sys.stdout, f)

import dataset

train_transform = transforms.Compose([
    transforms.RandomCrop((128, 128)),
    transforms.ToTensor(),
])

val_transform = transforms.Compose([
    transforms.RandomCrop((128, 128)),
    transforms.ToTensor(),
])

if not 'train_set' in locals():
    print('Reading image info from disk...')
Exemple #9
0
from models_kp import KeyPointNet
from models_dy import DynaNetGNN, HLoss
from utils import rand_int, count_parameters, Tee, AverageMeter, get_lr, to_np, set_seed

args = gen_args()
set_seed(args.random_seed)

torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)

os.system('mkdir -p ' + args.outf_kp)
os.system('mkdir -p ' + args.dataf)

if args.stage == 'dy':
    os.system('mkdir -p ' + args.outf_dy)
    tee = Tee(os.path.join(args.outf_dy, 'train.log'), 'w')
else:
    raise AssertionError("Unsupported env %s" % args.stage)

print(args)

# generate data
trans_to_tensor = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

## Loading datasets here, switch to our dataset

if (args.env == "half-cheetah"):
    dataset_name = "halfcheetah-bullet-mixed-v0"
Exemple #10
0
    args.des_dir += '_noEdgeSuperv'
if args.pn:
    args.outf += '_pn'
args.outf += '_pstep_' + str(args.pstep)
# args.dataf = args.dataf + '_' + args.env
args.evalf = args.evalf + '_' + args.env
if args.use_attr == 0:
    args.evalf += '_noAttr'
if args.edge_superv == 0:
    args.evalf += '_noEdgeSuperv'

os.system('mkdir -p ' + args.evalf)
os.system('mkdir -p ' + args.des_dir)

# setup recorder
tee = Tee(os.path.join(args.des_dir, 'eval.log'), 'w')
print(args)

use_gpu = torch.cuda.is_available()

# define interaction network
model = PropagationNetwork(args, residual=True, use_gpu=use_gpu)
print("model #params: %d" % count_parameters(model))

if args.epoch == 0 and args.iter == 0:
    model_path = os.path.join(args.outf, 'net_best.pth')
else:
    model_path = os.path.join(
        args.outf, 'net_epoch_%d_iter_%d.pth' % (args.epoch, args.iter))

print("Loading saved ckp from %s" % model_path)
Exemple #11
0
loader = pil_loader

trans_to_tensor = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])


'''
store results
'''
os.system('mkdir -p ' + args.evalf)

log_path = os.path.join(args.evalf, 'log.txt')
tee = Tee(log_path, 'w')


def evaluate(roll_idx, video=True, image=True):

    eval_path = os.path.join(args.evalf, str(roll_idx))

    n_split = 4
    split = 4

    if image:
        os.system('mkdir -p ' + eval_path)
        print('Save images to %s' % eval_path)

    if video:
        video_path = eval_path + '.avi'
Exemple #12
0
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                    help='use pre-trained model')
parser.add_argument('--lr_decay', default='50', type=int,
                    help='lr decay frequency')
parser.add_argument('--crop_size', default='256', type=int,
                    help='size of cropped image')
parser.add_argument('--visualize', dest='visualize', action='store_true',
                    help='visualize middle output')
parser.add_argument('--nparts', default='15', type=int,
                    help='number of keypoints')

best_prec1 = 0
time_string = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')

cine('logs')
Tee('logs/cmd_log_{}'.format(time_string), 'w')

unisize = 256
outsize = 64

def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    global fig, ax1, ax2, ax3, ax4
    if args.visualize:
        plt.ion()
        plt.show()
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2)
Exemple #13
0
from utils import load_data, Tee
import torch.nn.functional as F

lr = 1e-2
momentum = 0.9
n_epochs = 50
n_classes = 10
model_v = "reg1-1"

root_path = "/opt/tiger/vehiclereid/result/update"
save_path = os.path.join(root_path, "result_models/{}".format(model_v))
log_path = os.path.join(root_path, "logs/{}".format(model_v))
os.makedirs(save_path, exist_ok=True)
os.makedirs(log_path, exist_ok=True)
log_file = "train_scnn_{}.txt".format(model_v)
logger = Tee(os.path.join(log_path, log_file), 'w')

os.environ["CUDA_VISIBLE_DEVICES"] = "3"


def eval_ece(net, dataloader, n_bins=10):
    net.eval()
    bin_boundaries = torch.linspace(0, 1, n_bins + 1)
    bin_lowers = bin_boundaries[:-1]
    bin_uppers = bin_boundaries[1:]

    acc_lst, conf_lst = [], []

    #noise = noise_vector.to(device)

    with torch.no_grad():
    def predictor_worker(recv_queue, send_queue, worker_alive):
        predictor = None
        predictor_args = ()
        timing = AccumDict()
        log = Tee('./logs/predictor_worker.log')

        try:
            while worker_alive.value:
                tt = TicToc()

                try:
                    method, data = recv_queue.get(timeout=GET_TIMEOUT)
                except queue.Empty:
                    continue

                # get the latest non-critical request from the queue
                # don't skip critical request
                while not recv_queue.empty() and not method['critical']:
                    log(f"skip {method}")
                    method, data = recv_queue.get()

                log("working on", method)

                try:
                    tt.tic()
                    if method['name'] == 'predict':
                        image = cv2.imdecode(
                            np.frombuffer(data, dtype='uint8'), -1)
                    else:
                        args = msgpack.unpackb(data)
                    timing.add('UNPACK', tt.toc())
                except ValueError:
                    log("Invalid Message")
                    continue

                tt.tic()

                if method['name'] == "hello":
                    result = "OK"
                elif method['name'] == "__init__":
                    if args == predictor_args:
                        log("Same config as before... reusing previous predictor"
                            )
                    else:
                        del predictor
                        predictor_args = args
                        predictor = PredictorLocal(*predictor_args[0],
                                                   **predictor_args[1])
                        log("Initialized predictor with:", predictor_args)
                    result = True
                    tt.tic()  # don't account for init
                elif method['name'] == 'predict':
                    assert predictor is not None, "Predictor was not initialized"
                    result = getattr(predictor, method['name'])(image)
                else:
                    assert predictor is not None, "Predictor was not initialized"
                    result = getattr(predictor, method['name'])(*args[0],
                                                                **args[1])
                timing.add('CALL', tt.toc())

                tt.tic()
                if method['name'] == 'predict':
                    assert isinstance(
                        result, np.ndarray
                    ), f'Expected np.ndarray, got {result.__class__}'
                    ret_code, data_send = cv2.imencode(
                        ".jpg", result,
                        [int(cv2.IMWRITE_JPEG_QUALITY), opt.jpg_quality])
                else:
                    data_send = msgpack.packb(result)
                timing.add('PACK', tt.toc())

                if method['critical']:
                    send_queue.put((method, data_send))
                else:
                    try:
                        send_queue.put((method, data_send), block=False)
                    except queue.Full:
                        log("send_queue full")
                        pass

                Once(timing, log, per=1)
        except KeyboardInterrupt:
            log("predictor_worker: user interrupt")
        except Exception as e:
            log("predictor_worker error")
            traceback.print_exc()

        worker_alive.value = 0
        log("predictor_worker exit")
Exemple #15
0
                "model": model.state_dict(),
                "args": args.__dict__
            }
            torch.save(
                state,
                os.path.join(args.output_dir, args.expID + "_checkpoint.pth"))

    # Plot training curves
    plotter = Plotter(os.path.join(args.output_dir, args.expID))
    plotter.plot_training(train_loss, val_loss, val_acc)


if __name__ == '__main__':
    args = parse()
    os.makedirs(args.output_dir, exist_ok=True)
    sys.stdout = Tee(os.path.join(args.output_dir, args.expID + "_stdout.txt"),
                     "w")

    print('SETUP.')
    print(args.__dict__)

    # Seeding
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    # Cuda
    args.device = torch.device('cpu')
    args.kwargs = {}
    if not args.disable_cuda and torch.cuda.is_available():
        args.device = torch.device('cuda:0')
        torch.backends.cudnn.benchmark = True
from config import gen_args
from data import load_data, get_scene_info, normalize_scene_param
from data import get_env_group, prepare_input, denormalize
from models import Model
from utils import add_log, convert_groups_to_colors
from utils import create_instance_colors, set_seed, Tee, count_parameters

import matplotlib.pyplot as plt

args = gen_args()
set_seed(args.random_seed)

os.system('mkdir -p ' + args.evalf)
os.system('mkdir -p ' + os.path.join(args.evalf, 'render'))

tee = Tee(os.path.join(args.evalf, 'eval.log'), 'w')

### evaluating

data_names = args.data_names

use_gpu = torch.cuda.is_available()

# create model and load weights
model = Model(args, use_gpu)
print("model_kp #params: %d" % count_parameters(model))

if args.eval_epoch < 0:
    model_name = 'net_best.pth'
else:
    model_name = 'net_epoch_%d_iter_%d.pth' % (args.eval_epoch, args.eval_iter)
if with_gui:
    gui = ti.GUI("MLS-MPM",
                 res=1024,
                 background_color=0x112F41,
                 show_gui=args.show)

if write_to_disk:
    for i in range(1000):
        output_dir = f'{args.out_dir}_{i:03d}'
        if not os.path.exists(output_dir):
            break
    os.makedirs(f'{output_dir}/particles')
    os.makedirs(f'{output_dir}/previews')
    print("Writing 2D vis and binary particle data to folder", output_dir)
    tee = Tee(fn=f'{output_dir}/log.txt', mode='w')
    print(args)
else:
    output_dir = None

# Use 512 for final simulation/render
R = args.res
thickness = 2

mpm = MPMSolver(res=(R, R, R),
                size=1,
                unbounded=True,
                dt_scale=1,
                quant=True,
                use_g2p2g=False,
                support_plasticity=True,
Exemple #18
0
 def duplicate_output_to_log(self):
     self.tee = Tee(self.get_ckpt_folder() + 'log.txt')
np.random.seed(args.r_seed)
torch.manual_seed(args.r_seed)
torch.cuda.manual_seed_all(args.r_seed)


def _init_fn(worker_id):
    np.random.seed(args.r_seed + worker_id)


#####################
# PRINT SYSTEM INFO #
#####################

os.makedirs(f'logs/{args.run_name}', exist_ok=True)
log_f = Tee(f'logs/{args.run_name}/log.txt', 'a')

print("OS: ", sys.platform)
print("Python: ", sys.version)
print("PyTorch: ", torch.__version__)
print("Numpy: ", np.__version__)

USE_CUDA = not args.no_cuda
device = torch.device(
    "cuda:0" if USE_CUDA and torch.cuda.is_available() else "cpu")
print('Using device:', device)
if device.type == 'cuda':
    print(torch.cuda.get_device_name(0))
    print('Memory Usage:')
    print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024**3, 1),
          'GB')
import os
import sys
import predictor_worker

from arguments import opt
from utils import Tee

log = Tee('./logs/impersonator_server.log')

if __name__ == "__main__":
    log('Loading Predictor')
    predictor_args = {
        'config_path': opt.config,
        'checkpoint_path': opt.checkpoint,
        'relative': opt.relative,
        'adapt_movement_scale': opt.adapt_scale,
        'enc_downscale': opt.enc_downscale
    }

    predictor_worker.run_worker(opt.in_port, opt.out_port)
    sys.exit(0)