Exemplo n.º 1
0
def main(argv):
    opts, args = getopt.getopt(argv, "m:n:rd:p:t:")
    dump_type = device.DUMP_ALL
    target = ''
    profile = log_pattern.PROFILE_ALL
    logtype = log_pattern.TYPE_ALL
    for op, value in opts:
        if op == "-d":
            device.inputdir = value
        elif op == "-m":
            target = value
            dump_type = device.DUMP_BY_MAC
        elif op == "-n":
            target = value
            dump_type = device.DUMP_BY_NAME
        elif op == "-r":
            device.force_parsing = True
        elif op == "-p":
            profile = value
        elif op == "-t":
            logtype = value

    device.get_device(dump_type, target, target_device_set)

    for a, b in target_device_set.items():
        if debug: print("{0} - {1}".format(a, b))

    parsing(profile, logtype)
Exemplo n.º 2
0
 def get(self):
     util.save_url(self.request, self.response)
     util.fill_app_attrs(self.app, self.request.uri)
     user_agent = self.request.headers['User-Agent']
     view = self.view.type
     self.view.device = device.get_device(user_agent, view)
     self.response.out.write(util.render_template(self.app, self.view))
Exemplo n.º 3
0
def compile(source, nvcc='nvcc', options=[], keep=False,
            no_extern_c=False, arch=None, code=None, cache_dir=None,
            include_dirs=[]):

    if not no_extern_c:
        source = 'extern "C" {\n%s\n}\n' % source

    options = options[:]
    if arch is None:
        try:
            import device
            arch = "sm_%d%d" % device.get_device().compute_capability()
        except RuntimeError:
            pass

    if cache_dir is None:
        cache_dir = os.path.join(tempfile.gettempdir(), 
                "svxx-cuda-compiler-cache-v1-%s" % _get_per_user_string())

        from os import mkdir
        try:
            mkdir(cache_dir)
        except OSError, e:
            from errno import EEXIST
            if e.errno != EEXIST:
                raise
Exemplo n.º 4
0
    def fill_view_attrs(self, post_id):
        user_agent = self.request.headers['User-Agent']
        view = self.view.type
        self.view.device = device.get_device(user_agent, view)

        client = BloggerPostClient()
        settings = util.get_settings()
        entry = client.get_one_post(settings.get('blog_id'), post_id)
        if entry:
            self.view.title = entry.title.text
            self.view.permalink = entry.get_html_link().href

            labels = []
            for label in entry.category:
                labels.append(label.term)
            labels.sort()
            self.view.labels = labels
            util.save_labels(labels, self.request, self.response)

            self.view.published = util.get_datetime_from_iso8601(entry.published.text[:19])
            if entry.updated:
                self.view.isUpdated = True
                self.view.updated =  util.get_datetime_from_iso8601(entry.updated.text[:19])
            self.view.content = util.replace_permalinks(entry.content.text)
        else:
            logging.error("Fail to get Atom feed for the post, #%d" % post_id)
Exemplo n.º 5
0
def main():
    args = parse_args()
    dev = device.get_device(args.serial)
    if not check_env(dev):
        print "You must have root permission"
        sys.exit()
    kill_app(dev, get_be_killed_apps(dev, args.k))
Exemplo n.º 6
0
 def _check_arch(self, arch):
     if arch is None: return
     try:
         import device
         capability = device.get_device().compute_capability()
         if tuple(map(int, tuple(arch.split("_")[1]))) > capability:
             from warnings import warn
             warn("trying to compile for a compute capability "
                     "higher than selected GPU")
     except:
         pass
Exemplo n.º 7
0
    def fill_view_attrs(self):
        user_agent = self.request.headers['User-Agent']
        view = self.view.type
        self.view.device = device.get_device(user_agent, view)

        self.view.content.blog_id = ''
        user = users.get_current_user()
        if user:
            user_settings = model.get_user_settings(user.user_id())
            if user_settings.blog_id:
                self.view.content.blog_id = user_settings.blog_id
            if user_settings.mod_time:
                self.view.mod_time = user_settings.mod_time.strftime("%Y-%m-%d %H:%M:%S")
        else:
            logging.error("This feature cannot perform without user login.")
Exemplo n.º 8
0
    def get(self):
        util.save_url(self.request, self.response)
        util.fill_app_attrs(self.app, self.request.uri)
        user_agent = self.request.headers['User-Agent']
        view = self.view.type
        self.view.device = device.get_device(user_agent, view)

        status_code = 400
        self.response.set_status(status_code)

        request_pattern = re.compile('https?://[^/]+(/.*)')
        params = request_pattern.match(self.request.uri).group(1)

        self.view.status_code = status_code
        self.view.message = webapp.Response.http_status_message(status_code)
        self.view.content = params

        self.response.out.write(util.render_template(self.app, self.view))
Exemplo n.º 9
0
def main(unused_argv):
    """
    """
    config = get_config()
    logger = get_logger()
    device = get_device()

    input_images = tf.placeholder(
        tf.float32,
        shape=[None, None, None, 3],
        name="input_images"
    )

    logger.debug("input_images: {}".format(input_images))

    for i, device_id in enumerate(device.device_list):
        device_name = device.make_device_name(device_id)

        logger.debug("device_name: {}".format(device_name))

    init_op = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init_op)

        coord = tf.train.Coordinator()

        enqueue_threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for step in range(FLAGS.max_steps):
            logger.debug("step: {:06d}".format(step))

            if step % FLAGS.save_checkpoint_steps == 0:
                pass

            if step % FLAGS.save_summary_steps == 0:
                pass

        coord.request_stop()

        coord.join(enqueue_threads)
Exemplo n.º 10
0
def schedule(recordings):
    # get current time in UTC
    ctime = int(time.time())
    all_recordings = recordings
    # create a new list of recordings based on the status
    recordings = [ r for r in recordings if r.status in \
                   (CONFLICT, SCHEDULED, RECORDING) ]
    # new dict for schedule information. Each entry is r.status,
    # r.device, r.respect_start_padding, r.respect_stop_padding
    schedule = {}
    # sort by start time
    recordings.sort(lambda l, o: cmp(l.start,o.start))
    for r in recordings[:]:
        # check recordings we missed (stop passed or start over 10
        # minutes ago), remember that in status and remove this
        # recording from the list.
        if r.stop < ctime or (r.start + 600 < ctime and r.status != RECORDING):
            schedule[r.id] = [ MISSED, None, True, True ]
            recordings.remove(r)
        elif r.status == RECORDING:
            # mark current running recordings
            schedule[r.id] = [ r.status, r.device, r.respect_start_padding, \
                               r.respect_stop_padding ]
        else:
            device = get_device(r.channel)
            if device:
                # set to the best device for each recording
                schedule[r.id] = [ SCHEDULED, device, True, True ]
            else:
                # no device found, remove from the list
                schedule[r.id] = [ CONFLICT, None, True, True ]
                recordings.remove(r)

    # recordings is a list fo current running or future recordings
    # detect possible conflicts (delayed to avoid blocking the main loop)
    schedule = yield conflict.resolve(recordings, schedule)
    for r in all_recordings:
        if r.id in schedule:
            r.status, r.device, r.respect_start_padding, \
                      r.respect_stop_padding = schedule[r.id]
Exemplo n.º 11
0
def schedule(recordings):
    # get current time in UTC
    ctime = int(time.time())
    all_recordings = recordings
    # create a new list of recordings based on the status
    recordings = [ r for r in recordings if r.status in \
                   (CONFLICT, SCHEDULED, RECORDING) ]
    # new dict for schedule information. Each entry is r.status,
    # r.device, r.respect_start_padding, r.respect_stop_padding
    schedule = {}
    # sort by start time
    recordings.sort(lambda l, o: cmp(l.start, o.start))
    for r in recordings[:]:
        # check recordings we missed (stop passed or start over 10
        # minutes ago), remember that in status and remove this
        # recording from the list.
        if r.stop < ctime or (r.start + 600 < ctime and r.status != RECORDING):
            schedule[r.id] = [MISSED, None, True, True]
            recordings.remove(r)
        elif r.status == RECORDING:
            # mark current running recordings
            schedule[r.id] = [ r.status, r.device, r.respect_start_padding, \
                               r.respect_stop_padding ]
        else:
            device = get_device(r.channel)
            if device:
                # set to the best device for each recording
                schedule[r.id] = [SCHEDULED, device, True, True]
            else:
                # no device found, remove from the list
                schedule[r.id] = [CONFLICT, None, True, True]
                recordings.remove(r)

    # recordings is a list fo current running or future recordings
    # detect possible conflicts (delayed to avoid blocking the main loop)
    schedule = yield conflict.resolve(recordings, schedule)
    for r in all_recordings:
        if r.id in schedule:
            r.status, r.device, r.respect_start_padding, \
                      r.respect_stop_padding = schedule[r.id]
Exemplo n.º 12
0
"""

import torch

# from torchvision.models import vgg19

# vgg = vgg19(pretrained=True)
# features_conv = vgg.features[:36]
# vgg.features

############ DATA & TRANSFORMS

from data import get_data
from device import get_device

device = get_device(force_cpu=False)
train_loader, test_loader = get_data(device, batch_size=64, data='cifar10')

##################### MODEL

from model import NetCifar2
from torchsummary import summary
model = NetCifar2().to(device)

summary(model, input_size=(3, 32, 32))

##################### RUN MODEL

from run import run_model

epochs = 20
Exemplo n.º 13
0
 def set_device(self):
     user_agent = self.request.headers['User-Agent']
     view = self.view.type
     self.view.device = device.get_device(user_agent, view)
Exemplo n.º 14
0
 def setUp(self):
     self.device = get_device()
Exemplo n.º 15
0
        ...
        [d d d ... c c c]
        [d d d ... c c c]

    Embed:
        Use pretrained value -> embed models.
        1. d -> one_hot         d -> [0 ... 0 1 0 0 ... 0]
        2. one_hot -> vec       [0 ... 0 1 0 0 ... 0] -> [v1, v2, v3, ... vn]

    Training.

    Generator: -> [vec, vec, vec ... c c c]

'''

device = get_device()

torch.cuda.set_device(0)
device = torch.device("cuda")

data_cat, data_cont, max_length = load_data()

padded_data_cat: torch.Tensor = \
    torch.nn.utils.rnn.pad_sequence(data_cat, batch_first=True).split(split_size=PADDING, dim=1)[0]
padded_data_cont: torch.Tensor = \
    torch.nn.utils.rnn.pad_sequence(data_cont, batch_first=True).split(split_size=PADDING, dim=1)[0]

data_train = DataLoader(TensorDataset(padded_data_cat, padded_data_cont),
                        batch_size=BATCH_SIZE,
                        shuffle=True)