Example #1
0
def work_loop():
    global exit_flag
    opts = get_options()
    queries = cycle(QueryStorage(opts.domain_file))
    networks = cycle(SourceNetworks(opts.src_ips_file))
    qps = opts.qps
    ttr = opts.time_to_run

    timer = threading.Timer(ttr, wait)
    timer.start()

    interval = 1.0 / float(qps)

    try:
        for net, qd in izip(networks, queries):
            if exit_flag:
                break
            src = random_ip(net) if opts.spoof_ips else None
            query = prep_query(qd, opts.server_addr, src)
            send_query(query)

            sleep(interval)
    except:
        timer.cancel()
        raise
Example #2
0
def test():
    from torchtext import data
    from datasets import build_bible_datasets
    from options import get_options

    bucket_iter_train, _ = build_bible_datasets()
    sample = next(iter(bucket_iter_train))
    merge_dim = 1
    en_sem, en_sty, decoder, adv_disc = build_models(bucket_iter_train.dataset,
                                                     get_options())
    in_var, in_len = sample.sent_0
    #print(in_var.shape, in_len.shape)  # torch.Size([32, 56 or 66]) torch.Size([32])

    # print ('length0',sample.sent_0[1])
    # print ('length1',sample.sent_1[1])
    sem_out = T(en_sem(sample.sent_0))
    print('result of en_sem', sem_out.shape)  # [1, 32, 20]
    sty_out = T(en_sty(sample.sent_1))
    #print('sty_out', sty_out.shape, 'concat', T(torch.cat([sty_out, sty_out], dim=merge_dim)).shape)

    merged = T(torch.cat([sem_out, sty_out], dim=merge_dim))
    disc_out = T(adv_disc(merged))
    merged.unsqueeze_(0)

    #print('merged2', merged.shape)
    decoder_outputs, _, _ = decoder(
        inputs=
        None,  # pass not None for teacher focring  (batch, seq_len, input_size)
        encoder_hidden=
        merged,  # (num_layers * num_directions, batch_size, hidden_size)
        encoder_outputs=None,  # pass not None for attention
        teacher_forcing_ratio=0  # range 0..1 , must pass inputs if >0
    )
    decoder_outputs = decoder_outputs
Example #3
0
def graph(G, coord, paths, data, color):
    colors = set_colors(color)
    graph = set_graph(G, paths, colors)
    interval = 8 if (data['vertices'] >= 1000) else 500

    # set plots
    fig, ax = plt.subplots(figsize=(23, 13))
    options = get_options(G, data['vertices'], ax, coord)
    ax.set_facecolor(colors['background'])
    plt.gca().invert_yaxis()
    #add paths tools
    tm = fig.canvas.manager.toolmanager
    tb = fig.canvas.manager.toolbar
    #create tools
    tm.add_tool('Shortest', Shortest, graph=graph, options=options)
    tm.add_tool('Optimum', Optimum, graph=graph, options=options)
    tm.add_tool('Ants',
                Ants,
                fig=fig,
                interval=interval,
                graph=graph,
                options=options)
    #add tools to navigation bar
    tb.add_tool('Shortest', 'Navigation')
    tb.add_tool('Optimum', 'Navigation')
    tb.add_tool('Ants', 'Navigation')
    #remove unused tools
    tm.remove_tool('subplots')
    #draw base graph
    draw_graph(0, G, options, None, None, colors)
    fig.tight_layout()
    plt.show()
Example #4
0
def main():
    opts = get_options()
    anum = opts.anum
    cnum = opts.cnum
    batch_size = opts.batch_size
    maxiter = opts.iteration
    device = torch.device(opts.cuda)
    lr = opts.lr
    clip = opts.clip
    clip_argv = opts.clip_norm
    trainIns = opts.trainIns
    modelpath = opts.modelpath

    if not os.path.exists(modelpath):
        os.mkdir(modelpath)

    tsp = TrainModleMTSP(_modelpath=modelpath,
                         anum=anum,
                         cnum=cnum,
                         _device=device,
                         clip=clip,
                         clip_argv=clip_argv,
                         lr=lr,
                         train_instance=trainIns)
    tsp.eval(batch_size=batch_size)
    tsp.train_without_baseline(maxiter=maxiter, batch_size=batch_size)
Example #5
0
def widget_detail(request, widgetsettings_id):
    """Widget details page where user can set options defined for the widget
    """
    widget = WidgetSettings.objects.get(pk=widgetsettings_id)
    opts = widget.get_widget().options(widget)
    
    if request.method == "POST":
        form = options_form(opts)(request.POST)
        if form.is_valid():
            for field in form.cleaned_data:
                set_option(field, form.cleaned_data[field])
    
    if opts:
        defaults = [opt.get('default') for opt in opts.values()]
        initial = get_options(opts.keys(), defaults)
        form = options_form(opts)(initial=initial)
    else:
        form = None
    
    extra_context = {
        'options_form': form 
    }
    queryset = WidgetSettings.objects.all()
    return object_detail(request, queryset, widgetsettings_id,
                         template_name="plugins/widget_detail.html",
                         extra_context=extra_context)
Example #6
0
def receive_message():
    print('Method: ' + str(request.method))
    if request.method == 'GET':
        """Before allowing people to message your bot, Facebook has implemented a verify token
        that confirms all requests that your bot receives came from Facebook."""
        token_sent = request.args.get("hub.verify_token")
        return verify_fb_token(token_sent)
    #if the request was not get, it must be POST and we can just proceed with sending a message back to user
    else:
        # get whatever message a user sent the bot
        output = request.get_json()
        for event in output['entry']:
            messaging = event['messaging']
            for message in messaging:
                msg = message.get('message')
                if msg:
                    #Facebook Messenger ID for user so we know where to send response back to
                    sender_id = message['sender']['id']
                    print('The sender id is: ')
                    print(sender_id)
                    print('The message received is: ')
                    print(msg)
                    msg_text = msg.get('text')
                    if msg_text:
                        if msg_text.startswith('#start-translate'):
                            # set a flag in the database
                            # response_sent_text = get_message()
                            options.update_options(sender_id,
                                                   options.default_opts)
                            response_sent_text = 'Starting translation. All audio attachments\
                                                    will now be converted from speech to text'

                            send_message(sender_id, response_sent_text)
                        elif msg_text.startswith('#options'):
                            opts = options.parse_options(msg_text)
                            options.update_options(sender_id, opts)
                            send_message(
                                sender_id, 'Translating FROM: %s, TO: %s' %
                                (opts['src'], opts['dest']))

                    #if user sends us a GIF, photo,video, or any other non-text item
                    attachments = msg.get('attachments')
                    if attachments:
                        attch = attachments[0]
                        attch_type = attch.get('type')
                        if attch_type == 'audio':
                            try:
                                opts = options.get_options(sender_id)
                            except Exception as e:
                                print(e)
                                opts = options.default_opts
                            url = attch['payload']['url']
                            src = opts['src']
                            dest = opts['dest']
                            response = a2t.convert_audio_from_url(url, src)
                            translator = Translator()
                            converted_response = \
                                translator.translate(response, dest=dest, src=src)
                            send_message(sender_id, converted_response.text)
    return "Message Processed"
Example #7
0
def receive_message():
    print('Method: ' + str(request.method))
    if request.method == 'GET':
        """Before allowing people to message your bot, Facebook has implemented a verify token
        that confirms all requests that your bot receives came from Facebook.""" 
        token_sent = request.args.get("hub.verify_token")
        return verify_fb_token(token_sent)
    #if the request was not get, it must be POST and we can just proceed with sending a message back to user
    else:
        # get whatever message a user sent the bot
        output = request.get_json()
        for event in output['entry']:
            messaging = event['messaging']
            for message in messaging:
                msg = message.get('message')
                if msg:
                    #Facebook Messenger ID for user so we know where to send response back to
                    sender_id = message['sender']['id']
                    print('The sender id is: ')
                    print(sender_id)
                    print('The message received is: ')
                    print(msg)
                    msg_text = msg.get('text')
                    if msg_text:
                        if msg_text.startswith('#start-translate'):
                            # set a flag in the database
                            # response_sent_text = get_message()
                            options.update_options(sender_id, options.default_opts)
                            response_sent_text = 'Starting translation. All audio attachments\
                                                    will now be converted from speech to text'
                            send_message(sender_id, response_sent_text)
                        elif msg_text.startswith('#options'):
                            opts = options.parse_options(msg_text)
                            options.update_options(sender_id, opts)
                            send_message(sender_id, 'Translating FROM: %s, TO: %s' % (opts['src'], opts['dest']))

                    #if user sends us a GIF, photo,video, or any other non-text item
                    attachments = msg.get('attachments')
                    if attachments:
                        attch = attachments[0]
                        attch_type = attch.get('type')
                        if attch_type == 'audio':
                            try:
                                opts = options.get_options(sender_id)
                            except Exception as e:
                                print(e)
                                opts = options.default_opts
                            url = attch['payload']['url']
                            src = opts['src']
                            dest = opts['dest']
                            response = a2t.convert_audio_from_url(url, src)
                            translator = Translator()
                            converted_response = \
                                translator.translate(response, dest=dest, src=src)
                            send_message(sender_id, converted_response.text)
    return "Message Processed"
def main():
    out_dir = 'predict_to'
    in_dir = 'predict_from'
    gen_npz = 'pretrained/gen.npz'

    opt = get_options()

    gen = SPADEGenerator(opt)
    gen.to_gpu(0)
    chainer.serializers.load_npz(gen_npz, gen)
    gen.to_cpu()

    os.makedirs(in_dir, exist_ok=True)
    os.makedirs(out_dir, exist_ok=True)

    files = glob(in_dir + '/*.*')
    if len(files) == 0:
        print('Erorr: No files to load in \'' + in_dir + '\'.')
        return

    num = 0
    for filename in files:
        print(filename + ': ', end="")
        src_img = Image.open(filename).convert('RGB')
        if src_img is None:
            print('Not Loaded')
            continue

        print('Loaded')
        src_array = np.array(src_img, dtype='float32')
        src_array = src_array.transpose((2, 0, 1)) / 255

        x_array = src_array[:3, :, :256]
        c_array = src_array[:3, :, 256:512]

        x_onehot = label2onehot(x_array, threshold=0.4, skip_bg=True, dtype='float32')
        x = chainer.Variable(x_onehot[np.newaxis, :, :, :].astype('float32'))

        c_array = c_array * x_onehot[2]  # crop with hair label
        c = chainer.Variable(c_array[np.newaxis, :, :, :].astype('float32'))

        out = gen([x, c])

        x_array = np.transpose(x_array, (1, 2, 0))
        out_array = np.transpose((out.array[0] + 1) / 2, (1, 2, 0))

        img_array = np.concatenate((x_array, out_array), axis=1) * 255
        img = Image.fromarray(img_array.astype('uint8'))

        path = out_dir + '/' + str(num) + '.png'
        img.save(path)

        num += 1
Example #9
0
def main():

    args = options.get_options()
    print(args)

    #config training dataset
    test_transform = transforms.Compose([
        Resize((96, 96)),
        ToTensor(96),
        Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    testset = FaceLandmarksDataset([{
        "root_dir":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/WFLW/images",
        "label_file":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/WFLW/landmark5p_label.txt"
    }, {
        "root_dir":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/JD106/images",
        "label_file":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/JD106/landmark5p_label.txt"
    }, {
        "root_dir":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/menpoTrain106/images",
        "label_file":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/menpoTrain106/landmark5p_label.txt"
    }, {
        "root_dir":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/menpoTest106/images",
        "label_file":
        "/home/hanson/work/FaceLandmark_Pytorch/dataset/menpoTest106/landmark5p_label.txt"
    }],
                                   point_num=5,
                                   transform=test_transform)

    testloader = data.DataLoader(testset, batch_size=200, num_workers=4)
    print("test image %d" % len(testset))

    #define training model
    model = resnet.inference(10).cuda()

    if len(args.snapshot) > 0:
        saved_state_dict = torch.load(args.snapshot)
        model.load_state_dict(saved_state_dict)

    criterion = nn.MSELoss().cuda()

    test_loss = eval(model, testloader, criterion)

    print(test_loss)
Example #10
0
def main():
   #print('used: %s' % info.get_used_space('/home'))
   (opts, args) = options.get_options()

   cosm.push(opts.key, opts.feed, get_data())
   if opts.oneshot is True:
      return

   try:
      while True:
         time.sleep(30 * 60)
         cosm.push(opts.key, opts.feed, get_data())
   except KeyboardInterrupt as err:
      pass
Example #11
0
def main(_):
    options = get_options()
    if options.run_mode == 'train':
        check_options(options)
        start_train(options, exp_mode=options.exp_mode)

    elif options.run_mode == 'eval_depth':
        start_eval_depth(options)

    elif options.run_mode == 'eval_pose':
        start_eval_pose(options)

    else:
        raise NotImplementedError(
            'must choose from [\'trian\', \'eval_depth\', \'eval_pose\']')
Example #12
0
def main():

   # parse command options
   (opts, args) = options.get_options()

   # read user-provided bot config
   if not opts.config:
      raise Exception('config file name not defined')
   cfg = config.Config(opts.config)

   # execute
   bot = PussyBot(cfg, opts.password)
   if bot.connect():
      bot.process(block=True)
   else:
      print('Unable to connect')
Example #13
0
    def test_init(self):
        np.random.seed(1)

        data_manager = DataManager()
        place_cells = PlaceCells()
        hd_cells = HDCells()
        data_manager.prepare(place_cells, hd_cells)

        sequence_length = 100

        model = Model(place_cell_size=place_cells.cell_size,
                      hd_cell_size=hd_cells.cell_size,
                      sequence_length=sequence_length)

        flags = get_options()
        trainer = Trainer(data_manager, model, flags)
Example #14
0
def main(args=None):
    opt = get_options(args)
    if opt.mode == 'save':
        raise ValueError(
            'In video processing, "mode" should be "exec" or "debug".')

    print("Loading '{}'".format(opt.input))
    count = 1
    # Load the video
    if opt.realtime:
        cap = cv2.VideoCapture(int(opt.input))
        frames = 0
    else:
        cap, origin_fps, frames, width, height = load_video(opt.input)
    if opt.mode != 'debug' and not opt.realtime:
        writer = video_writer(opt.output_path, origin_fps, width, height,
                              opt.resize_factor)
    model = GANonymizer(opt)

    while (cap.isOpened()):
        print('')
        ret, frame = cap.read()
        if ret:
            print('-----------------------------------------------------')
            print('[INFO] Count: {}/{}'.format(count, frames))

            # process
            input, output = model(F.to_pil_image(frame))
            input, output = np.array(input), np.array(output)
            concat = np.concatenate([input, output], axis=0)
            if opt.mode != 'debug':
                if opt.realtime:
                    cv2.imshow('frame', concat)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                else:
                    writer.write(concat)
            count += 1
        else:
            break

    # Stop video process
    cap.release()
    if opt.mode != 'debug' and not opt.realtime:
        writer.release()
    cv2.destroyAllWindows()
Example #15
0
def prep_query(params, dst, src):
    opts = get_options()
    res = None
    typ = 'A'
    if src is not None and opts.spoof_ips:
        res = IP(dst=dst,
                 src=src)
    else:
        res = IP(dst=dst)
    if isinstance(params.type, A):
        typ = 'A'
    elif isinstance(params.type, AAAA):
        typ = 'AAAA'
    elif isinstance(params.type, MX):
        typ = 'MX'
    res = res / UDP() / DNS(rd=1, qd=DNSQR(qname=str(params.domain), qtype=typ))

    return res
Example #16
0
def main():
    ############get command line arguments############
    opts = get_options()
    # Pretty print the run args
    pp.pprint(vars(opts))

    ############prepare midi file#####################
    midi_file_name = opts.score
    midi_file_path = os.path.join(opts.static_dir, midi_file_name + ".mid")
    f = mido.MidiFile(midi_file_path)
    orig_notes = getNotes(f)
    orig_vecs = getSeqVecs(orig_notes, window=opts.window)
    createCSVFromListOfDict(
        orig_vecs, os.path.join(opts.static_dir, midi_file_name, "info.csv"))

    orig_tick_measure_list = getTickMeasureDict(f)

    # print("tick_measure_list: {}".format(orig_tick_measure_list))

    pygame.init()
    pygame.midi.init()

    # # prints connected midi devices
    # for n in range(pygame.midi.get_count()):
    #     # (interf, name, input, output, opened)
    #     print(n,pygame.midi.get_device_info(n))

    input_id = pygame.midi.get_default_input_id(
    )  # gets the first connected device
    input_device = pygame.midi.Input(input_id)
    print("midi input device connected: {}".format(
        pygame.midi.get_device_info(input_id)))

    run(opts, input_device, orig_vecs, orig_tick_measure_list)

    return
Example #17
0
 def test_bad_backend(self):
     try:
         params = options.get_options(["--backend=invalid"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #18
0
File: zeya.py Project: sekjal/zeya
    if bind_address != '':
        print "Binding to address %s" % bind_address

    print "Listening on port %d" % (port,)
    # Start up a web server.
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        pass
    finally:
        server.server_close()

if __name__ == '__main__':
    try:
        (show_help, backend_type, bitrate, bind_address, port, path, basic_auth_file) = \
            options.get_options(sys.argv[1:])
    except options.BadArgsError, e:
        print e
        options.print_usage()
        sys.exit(1)
    if show_help:
        options.print_usage()
        sys.exit(0)
    print "Using %r backend." % (backend_type,)
    try:
        backend = get_backend(backend_type)
    except IOError, e:
        print e
        sys.exit(1)
    run_server(backend, bind_address, port, bitrate, basic_auth_file)
Example #19
0
import sqlite3
import sys
import options
import tempfile
import os
import logging

__version__ = "0.0.3"

options = options.get_options(__version__)
app_dir = os.path.dirname(__file__)

logging.basicConfig(level=logging.INFO, format='%(levelname)-10s %(asctime)-24s %(message)s')
logging.info('Started with options: ' + str(options))

def check_export_table(c):
    with open(os.path.join(app_dir, 'schema.sql')) as f:
        sqls = f.read().split(';')

    for sql in sqls:
        c.execute(sql)

def export_to_xml(row_set):
    try:
        from lxml.etree import Element, SubElement, tostring
        format = { 'pretty_print': True, 'xml_declaration': True}
    except ImportError:
        from xml.etree.ElementTree import Element, SubElement, tostring
        format = {}

    doc = Element('history')
Example #20
0
        elif save in ['n', 'N', 'no']:
            print("No save or refresh cache.")
        else:
            print("Error 'Save' option. No save or refresh cache.")
    except (smtplib.SMTPException, TimeoutError) as e:
        print(e)


if __name__ == "__main__":
    # host
    SINA = "*****@*****.**"
    GMAIL = "*****@*****.**"
    QQMAIL = "*****@*****.**"

    os.chdir(sys.path[0])
    CACHE_FILE = "cache/cache-" + today() + ".html"

    # save or refresh cache.
    save, flush = options.get_options(sys.argv)

    # user info
    mail_host = "smtp.sina.cn"
    mail_user = "******"
    mail_pass = "******"
    sender = '*****@*****.**'
    receivers = [
        '*****@*****.**'
    ]  # 接收邮件,可设置为你的QQ邮箱或者其他邮箱 ,'*****@*****.**','*****@*****.**'

    send_mail(mail_user, mail_pass, receivers)
Example #21
0
import os
import sys
import signal
from options import get_options
from helper import failover, is_upgrade
from chorus_setup import chorus_set
from health_check import health_check, system_checking
from configure import configure
from log import logger
from color import bold, error
import traceback
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
options, arg, health_args = get_options(sys.argv)
is_upgrade = is_upgrade(options.chorus_path, options.data_path) and not options.force
handler = {"setup":chorus_set.setup, "health_check":health_check, "configure":configure.config}
def exit_gracefully(signum, frame):
    #print "\nSetup aborted, Cancelled by user"
    print
    failover(options.chorus_path, options.data_path, is_upgrade)
    sys.exit(1)

def main():
    try:
        signal.signal(signal.SIGINT, exit_gracefully)

        if (arg == "setup" and not options.disable_spec) \
           or (arg == "health_check" and (health_args == [] or "checkos" in health_args)):
            system_checking()

        if arg == "health_check":
            handler[arg](" ".join(health_args))
Example #22
0
import os
from collections import deque
import pygame

from environment.environment import Environment
from model.model import UnrealModel
from train.experience import ExperienceFrame
from options import get_options

BLUE = (128, 128, 255)
RED = (255, 192, 192)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)

# get command line args
flags = get_options("display")


class MovieWriter(object):
    def __init__(self, file_name, frame_size, fps):
        """
    frame_size is (w, h)
    """
        self._frame_size = frame_size
        fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
        self.vout = cv2.VideoWriter()
        success = self.vout.open(file_name, fourcc, fps, frame_size, True)
        if not success:
            print("Create movie failed: {0}".format(file_name))

    def add_frame(self, frame):
Example #23
0
 def test_path(self):
     params = options.get_options(["--path=/foo/bar"])
     self.assertEqual('/foo/bar', params[5])
Example #24
0
 def test_default(self):
     params = options.get_options([])
     self.assertFalse(params[0])
     self.assertEqual('dir', params[1])
     self.assertEqual(os.path.abspath('.'), params[5])
Example #25
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import matplotlib.pyplot as plt

from environment.environment import Environment
from model.model import UnrealModel
from options import get_options

# get command line args
flags = get_options("visualize")


def main(args):
    action_size = Environment.get_action_size(flags.env_type, flags.env_name)
    objective_size = Environment.get_objective_size(flags.env_type,
                                                    flags.env_name)
    global_network = UnrealModel(action_size, objective_size, -1,
                                 flags.use_lstm, flags.use_pixel_change,
                                 flags.use_value_replay,
                                 flags.use_reward_prediction, 0.0, 0.0,
                                 "/cpu:0")  # use CPU for weight visualize tool

    sess = tf.Session()

    init = tf.global_variables_initializer()
    sess.run(init)
Example #26
0
 def test_default(self):
     params = options.get_options([])
     self.assertFalse(params[0])
     self.assertEqual('dir', params[1])
     self.assertEqual('.', params[4])
Example #27
0
#!/usr/bin/python

from transmitter        import Transmitter
from debugtransmitter   import DebugTransmitter
from messagethrottler   import MessageThrottler
from twistedadapter     import TwistedAdapter
import options

from twisted.internet import protocol, reactor
from twisted.application import service, internet

options = options.get_options()

if not options.console:
    Transmitter = Transmitter
else:
    Transmitter = DebugTransmitter


transmitter = Transmitter(
    debug = options.debug
)

# setup factory for twisted
factory = protocol.ServerFactory()
factory.protocol = TwistedAdapter
factory.queue    = MessageThrottler(transmitter)

# run in twisted
reactor.listenTCP(
    int(options.port),
Example #28
0
        -u, --upsert    if true, update files that have already been indexed. If false, skip.
        -d, --debug     print extremely verbose debugging info to stdout
        -h, --hashing   if true, include an md5 hash in the document that is persisted
"""

import sys

from options import get_options
from settings import collection, connect_string, database

from image_indexer.imageIndexer import ImageIndexer
from image_indexer.imageDAO import ImageDAO


if __name__ == '__main__':
    opt = get_options(sys.argv[1:])

    if opt.debug:
        print "path=%s(%s), verbose=%s(%s), upsert=%s(%s), debug=%s(%s), hashing=%s(%s)" % \
              (opt.path, type(opt.path), opt.verbose, type(opt.verbose),
               opt.upsert, type(opt.upsert), opt.debug, type(opt.debug), opt.hashing,
               type(opt.hashing))

    dao = ImageDAO(connection_string=connect_string, database=database, collection=collection, upsert=opt.upsert)
    indexer = ImageIndexer(opt.path, dao, verbose=opt.verbose, debug=opt.debug, hashing=opt.hashing)
    try:
        indexer.index()
    except KeyboardInterrupt as e:
        print "Aborted. ", e

Example #29
0
    ]
    return torch.tensor(np.array(new_state)).float().cuda()


def axmodel(action):
    action = action.squeeze().cpu().data.numpy()
    p = (random.random() - 0.5) * 2
    new_action = list(action)
    new_action.append(p)
    # new_action = [p]
    # new_action.extend(list(action))
    return torch.tensor(np.array(new_action)).float().cuda()


if __name__ == '__main__':
    args = get_options()
    # args.env = 'Swimmer-v2'
    args.env = 'HalfCheetah-v2'
    args.pretrain_f = True
    args.data_type1 = 'base'
    args.data_type2 = '3leg'
    args.data_id1 = 0
    args.data_id2 = 0
    args.pair_n = 5000
    args.state_dim1 = 8
    args.action_dim1 = 2
    args.state_dim2 = 10
    args.action_dim2 = 3

    analysis()
    # main(args)
Example #30
0
 def test_bad_bitrate(self):
     try:
         params = options.get_options(["--bitrate=0"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #31
0
 def test_get_help(self):
     params = options.get_options(["--help"])
     self.assertTrue(params[0])
Example #32
0
 def test_bad_port(self):
     try:
         params = options.get_options(["--port=p"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #33
0
 def test_backend(self):
     params = options.get_options(["--backend=rhythmbox"])
     self.assertEqual('rhythmbox', params[1])
Example #34
0
from options import get_options

USE_GPU = True  # To use GPU, set True

# get command line args
flags = get_options("training")
Example #35
0
 def test_bitrate(self):
     params = options.get_options(["-b128"])
     self.assertEqual(128, params[2])
Example #36
0
 def test_bad_backend(self):
     try:
         params = options.get_options(["--backend=invalid"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #37
0
 def test_port(self):
     params = options.get_options(["-p9999"])
     self.assertEqual(9999, params[4])
Example #38
0
 def test_bitrate(self):
     params = options.get_options(["-b128"])
     self.assertEqual(128, params[2])
Example #39
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np

from environment.environment import Environment
from model.model import UnrealModel
from train.experience import ExperienceFrame
from options import get_options
import util

# get command line args
flags = get_options("evaluate")


class Evaluate(object):
    def __init__(self):
        self.action_size = Environment.get_action_size(flags.env_type,
                                                       flags.env_name)
        self.objective_size = Environment.get_objective_size(
            flags.env_type, flags.env_name)
        self.global_network = UnrealModel(self.action_size,
                                          self.objective_size,
                                          -1,
                                          flags.use_lstm,
                                          flags.use_pixel_change,
                                          flags.use_value_replay,
                                          flags.use_reward_prediction,
Example #40
0
 def test_bad_bitrate(self):
     try:
         params = options.get_options(["--bitrate=0"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #41
0
    #         except:
    #             ave_loss[key] = value
    #     print(display)
    #
    #     # if (batch_id + 1) % opt.display_gap == 0:
    #     #     path = os.path.join(img_logs, 'imgA_{}.jpg'.format(batch_id + 1))
    #     #     model.visual(path)
    #
    # display ='average loss: '
    # for key, value in ave_loss.items():
    #     display += '{}:{:.4f}  '.format(key, value/len(dataset))
    # print(display)


if __name__ == '__main__':
    opt = get_options()
    opt.lambda_G0 = 1
    opt.lambda_G1 = 1
    opt.lambda_G2 = 1
    opt.lambda_F = 500

    # opt.data_id1 = 1
    # opt.data_id2 = 1
    # opt.pretrain_f = False
    # train(opt)

    # opt.pretrain_f = True
    # opt.norm = True
    # opt.domain_name = 'finger'
    # opt.task_name = 'spin'
    # opt.data_id1 = 3
Example #42
0
 def test_port(self):
     params = options.get_options(["-p9999"])
     self.assertEqual(9999, params[4])
Example #43
0
from attention_model import AttentionModel
from problem_tsp import TSP as problem


def maybe_cuda_model(model, cuda, parallel=True):
    if cuda:
        model.cuda()

    if parallel and torch.cuda.device_count() > 1:
        model = DataParallel(model)

    return model


if __name__ == "__main__":
    opts = get_options()

    # Pretty print the run args
    pp.pprint(vars(opts))

    # Set the random seed
    torch.manual_seed(opts.seed)

    # Optionally configure tensorboard
    if not opts.no_tensorboard:
        configure(
            os.path.join(opts.log_dir, "{}_{}".format(problem.NAME,
                                                      opts.graph_size),
                         opts.run_name))

    os.makedirs(opts.save_dir)
Example #44
0
 def test_bad_port(self):
     try:
         params = options.get_options(["--port=p"])
         self.fail("get_options should have raised BadArgsError")
     except options.BadArgsError:
         pass
Example #45
0
    # Start the actual training loop
    train_dataset = problem.make_dataset(size=opts.graph_size,
                                         filename=opts.train_dataset)
    opts.epoch_size = train_dataset.size
    val_dataset = problem.make_dataset(size=opts.graph_size,
                                       filename=opts.val_dataset)
    opts.val_size = val_dataset.size

    if opts.resume:
        epoch_resume = int(
            os.path.splitext(os.path.split(opts.resume)[-1])[0].split("-")[1])

        torch.set_rng_state(load_data['rng_state'])
        if opts.use_cuda:
            torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
        # Set the random states
        print("Resuming after {}".format(epoch_resume))
        opts.epoch_start = epoch_resume + 1

    if opts.eval_only:
        validate(model, val_dataset, opts)
    else:
        for epoch in range(opts.epoch_start, opts.epoch_start + opts.n_epochs):
            train_epoch_sl(model, optimizer, lr_scheduler, epoch,
                           train_dataset, val_dataset, problem, tb_logger,
                           opts)


if __name__ == "__main__":
    run(get_options())
Example #46
0
                raise Exception("bootloader overflow in interface area")
            offset = interface_start
        
        # Copy bootloader
        image.write(bootloader.read())
        
        # Write padding
        for _ in range(offset - bootloader_size):
            image.write(pack('B', 0xFF))
        
        # Copy interface
        image.write(interface.read())


if __name__ == '__main__':
    options = get_options()
    print 'Generating image for (if: %s, target: %s)' % (options.interface, options.target)
    
    bootloader_elf = get_bootloader_path(options.interface)
    print 'Bootloader: %s' % bootloader_elf 
    
    interface_elf = get_interface_path(options.interface, options.target)
    print 'Interface : %s' % interface_elf
    
    image_bin = get_image_path(options.interface, options.target)
    print 'Image     : %s' % image_bin
    
    gen_binary(bootloader_elf, TMP_BOOTLOADER_BIN_PATH, is_lpc(options.interface))
    gen_binary(interface_elf , TMP_INTERFACE_BIN_PATH)
    
    merge(TMP_BOOTLOADER_BIN_PATH, TMP_INTERFACE_BIN_PATH, image_bin,
Example #47
0
from options import get_options
from datasets import get_dataloader
from model import get_model
from trainer import get_trainer

opt = get_options('train')
train_loader = get_dataloader(opt, 'train')
val_loader = get_dataloader(opt, 'val')
model = get_model(opt)
trainer = get_trainer(opt, model, train_loader, val_loader)

trainer.train()
Example #48
0
def main_test(_):
    options = get_options()
    evalualte_pose(options)