Пример #1
0
def install(path):
  '''
  If not installed, download the jQuery file to specified path.
  '''
  if not os.path.exists(path):
    tools.download(DOWNLOAD_URL, tools.path(path))
    print 'jquery installed'
Пример #2
0
def install(path):
  '''
  If not installed, download Brucrat.js.
  '''
  if not os.path.exists(path):
    tools.download(DOWNLOAD_URL, tools.path(path))
    print 'brucrat installed'
Пример #3
0
def install(path):
  '''
  If not installed, download latest version of underscore.js in specified path.
  '''
  if not os.path.exists(path):
    tools.download(DOWNLOAD_URL, tools.path(path))
    print 'underscore installed'
Пример #4
0
def install(path):
  '''
  If not installed, download the latest version of Backbone.js to specified
  path.
  '''
  if not os.path.exists(path):
    tools.download(DOWNLOAD_URL, tools.path(path))
    print 'backbone installed'
Пример #5
0
def download():
  '''
  Download sources as zip package if is not cached.
  '''
  cache_path = cache.path(DOWNLOAD_CACHE)
  if not os.path.exists(cache_path):
    tools.download(DOWNLOAD_URL, cache_path)
    print 'bootstrap downloaded'
  return cache_path
Пример #6
0
 def post(self):
     if not self.current_user:
         self.redirect("/login.html")
     else:
         filename = self.get_body_argument("filename").encode("ascii")
         servername = self.get_body_argument("servername").encode("ascii")
         cf = ConfigParser.ConfigParser()
         cf.read("config.conf")
         tools.download(servername, filename, ".")
         logging.warn("filename: %s, servername: %s" % (filename, servername))
Пример #7
0
 def post(self):
     if not self.current_user:
         self.redirect("/login.html")
     else:
         filename = self.get_body_argument("filename").encode("ascii")
         servername = self.get_body_argument("servername").encode("ascii")
         cf = ConfigParser.ConfigParser()
         cf.read("config.conf")
         tools.download(servername, filename)
         logging.warn("filename: %s, servername: %s" %
                      (filename, servername))
         result = {"result": 1, "difference": "text"}
         self.write(result)
Пример #8
0
 def download_weights(self, cnn = True, rnn = True, ask=False):
     if not ask: r='Y'
     if cnn:
         if ask and os.path.isfile(self.cnn):
             r = input('CNN Weights file already exists. Redownload? Y/N\n')
         elif ask:
             r = input('CNN weights not found. Download weights (377 MB)?  Y/N\n')
         if r.upper() == 'Y':
             try: os.mkdir('./weights')
             except Exception: pass
             tools.download('https://www.dropbox.com/s/otm6t0u2tmbj7sd/cnn.hdf5?dl=1', './weights/cnn.hdf5')
         
     if rnn:
         if ask and os.path.isfile(self.rnn):
             r = input('RNN Weights file already exists. Redownload? Y/N\n')
         elif ask:
             r = input('RNN Weights not found. Download weights (14 MB)? Y/N\n')
         if r.upper() == 'Y':
             try: os.mkdir('./weights')
             except Exception: pass
             tools.download('https://www.dropbox.com/s/t6n9x9pvt5tlvj8/rnn.hdf5?dl=1', './weights/rnn.hdf5')
def download_all_images(urls):
    """ Given a list of URLS, downloads all retrievable images and returns two dictionaries.

     :returns
     passed_images dict<string,string> -- local urls -> remote url mappings
     failed_images dict<string,string> -- remote url -> failure message mappings
     """
    passed_images = {}
    failed_images = {}
    for url in urls:
        try:
            local_img_loc = tools.download(url)
            passed_images[local_img_loc] = url
        except (DownloadException, Exception) as ex:
            failed_images[url] = ex.message
    return passed_images, failed_images
Пример #10
0
def install_windows():
    arch_bits = platform.architecture()[0]
    # Step 1: Install Downloadable Dependencies
    for prog_name in dependencies['download']:
        prog = dependencies['download'][prog_name]
        prog_path = download(prog[arch_bits][0], prog[arch_bits][1])
        if prog_path:
            print "Installing %s..." % prog_name, 
            call(prog['call'] % prog_path, shell=True)
            print "DONE (unless you cancelled, you fool?)"
        else:
            print "FAILED"
            
    # Step 2: Install node modules
    here_path = os.path.dirname(__file__)
    node_modules_folder = os.path.join(os.path.dirname(__file__), 'node_modules')
    if not os.path.exists(node_modules_folder):
        os.makedirs(node_modules_folder)
    for module_name in dependencies['node_modules']:
        print "Installing Node Module: %s..." % module_name
        call("npm install --prefix %s %s" % (here_path, module_name), shell=True)
        print "...Successfully installed Node Module: %s" % module_name
Пример #11
0
def install_windows():
    arch_bits = platform.architecture()[0]
    # Step 1: Install Downloadable Dependencies
    for prog_name in dependencies['download']:
        prog = dependencies['download'][prog_name]
        prog_path = download(prog[arch_bits][0], prog[arch_bits][1])
        if prog_path:
            print "Installing %s..." % prog_name,
            call(prog['call'] % prog_path, shell=True)
            print "DONE (unless you cancelled, you fool?)"
        else:
            print "FAILED"

    # Step 2: Install node modules
    here_path = os.path.dirname(__file__)
    node_modules_folder = os.path.join(os.path.dirname(__file__),
                                       'node_modules')
    if not os.path.exists(node_modules_folder):
        os.makedirs(node_modules_folder)
    for module_name in dependencies['node_modules']:
        print "Installing Node Module: %s..." % module_name
        call("npm install --prefix %s %s" % (here_path, module_name),
             shell=True)
        print "...Successfully installed Node Module: %s" % module_name
Пример #12
0
 def _go(self):
     while 1:
         if not video_url_queue.empty():
             pwd, true_url, page_url = video_url_queue.get()
             download(pwd, true_url, page_url)
             video_url_queue.task_done()
Пример #13
0
import argparse
from tools import download, trading_daterange

if __name__ == '__main__':
    api_key = open('apikey', 'r').readline().strip()

    parser = argparse.ArgumentParser()
    parser.add_argument('--start')
    parser.add_argument('--end')
    parser.add_argument('--filepath')
    args = parser.parse_args()

    # provide date to download all data for each company by command line argument YYYY-MM-DD
    start_date = args.start
    if start_date is None:
        raise ValueError('--start must be provided.')

    end_date = args.end if args.end else start_date
    filepath = args.filepath if args.filepath else './'
    filepath = filepath+'/' if not filepath[-1] == '/' else filepath

    for day in trading_daterange(start_date, end_date):
        download(date=day, api='xetra', api_key=api_key, dirpath=filepath)
        # download(date=day, api='eurex', api_key=api_key, filepath=filepath)
Пример #14
0
def handler(context):
    print(
        f'start training with parameters : {Parameters.as_dict()}, context : {context}'
    )

    try:
        dataset_alias = context.datasets  # for older version
    except AttributeError:
        dataset_alias = context['datasets']

    train_dataset_id, val_dataset_id = get_dataset_ids(dataset_alias)

    id2index, _ = set_categories(list(dataset_alias.values()))
    num_classes = len(id2index)
    num_classes += 1  # add for background class

    print(f'number of classes : {num_classes}')

    print("Start downloading datasets.")
    dataset_items = list(
        load_dataset_from_api(train_dataset_id, max_num=Parameters.MAX_ITEMS))
    print("Finish downloading datasets.")

    random.shuffle(dataset_items)
    if val_dataset_id is not None:
        val_dataset_items = list(
            load_dataset_from_api(val_dataset_id,
                                  max_num=Parameters.MAX_ITEMS))
        random.shuffle(val_dataset_items)
        train_dataset_items = dataset_items
    else:
        test_size = int(len(dataset_items) * Parameters.TEST_SIZE)
        train_dataset_items, val_dataset_items = dataset_items[
            test_size:], dataset_items[:test_size]

    train_dataset = ABEJAPlatformDataset(train_dataset_items,
                                         phase="train",
                                         transform=DataTransform(
                                             Parameters.IMG_SIZE,
                                             Parameters.MEANS))

    val_dataset = ABEJAPlatformDataset(val_dataset_items,
                                       phase="val",
                                       transform=DataTransform(
                                           Parameters.IMG_SIZE,
                                           Parameters.MEANS))

    print(f'train dataset : {len(train_dataset)}')
    print(f'val dataset : {len(val_dataset)}')

    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=Parameters.BATCH_SIZE,
                                       shuffle=Parameters.SHUFFLE,
                                       collate_fn=od_collate_fn)

    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=Parameters.BATCH_SIZE,
                                     shuffle=False,
                                     collate_fn=od_collate_fn)

    dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
    print(f'data loaders : {dataloaders_dict}')

    ssd_cfg = {
        'num_classes':
        num_classes,  # number of classes including background class
        'input_size': Parameters.IMG_SIZE,
        'bbox_aspect_num': Parameters.BBOX_ASPECT_NUM,
        'feature_maps': Parameters.FEATURE_MAPS,
        'steps': Parameters.STEPS,
        'min_sizes': Parameters.MIN_SIZES,
        'max_sizes': Parameters.MAX_SIZES,
        'aspect_ratios': Parameters.ASPECT_RATIOS,
        'conf_thresh': Parameters.CONF_THRESHOLD,
        'top_k': Parameters.TOP_K,
        'nms_thresh': Parameters.NMS_THRESHOLD
    }
    net = SSD(phase="train", cfg=ssd_cfg)

    # TODO: better to host this file by ourselves
    # https://github.com/amdegroot/ssd.pytorch#training-ssd
    url = 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth'
    weight_file = os.path.join(Parameters.ABEJA_TRAINING_RESULT_DIR,
                               'vgg16_reducedfc.pth')
    download(url, weight_file)

    vgg_weights = torch.load(weight_file)
    print('finish loading base network...')
    net.vgg.load_state_dict(vgg_weights)

    def weights_init(m):
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal_(m.weight.data)
            if m.bias is not None:  # in case of bias
                nn.init.constant_(m.bias, 0.0)

    # apply initial values of He
    net.extras.apply(weights_init)
    net.loc.apply(weights_init)
    net.conf.apply(weights_init)

    # configure loss function
    criterion = MultiBoxLoss(jaccard_thresh=Parameters.OVERLAP_THRESHOLD,
                             neg_pos=Parameters.NEG_POS,
                             device=device)

    # configure optimizer
    optimizer = optim.SGD(net.parameters(),
                          lr=Parameters.LR,
                          momentum=Parameters.MOMENTUM,
                          dampening=Parameters.DAMPENING,
                          weight_decay=Parameters.WEIGHT_DECAY,
                          nesterov=Parameters.NESTEROV)

    # move network to device
    net.to(device)

    # NOTE: This flag allows to enable the inbuilt cudnn auto-tuner
    # to find the best algorithm to use for your hardware.
    # cf. https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/2
    torch.backends.cudnn.benchmark = True

    iteration = 1
    epoch_train_loss = 0.0
    epoch_val_loss = 0.0
    latest_epoch_train_loss = epoch_train_loss
    latest_epoch_val_loss = epoch_val_loss

    for epoch in range(Parameters.EPOCHS):

        t_epoch_start = time.time()
        t_iter_start = time.time()

        print('-------------')
        print('Epoch {}/{}'.format(epoch + 1, Parameters.EPOCHS))
        print('-------------')

        # loop of train and validation for each epoch
        for phase in ['train', 'val']:
            if phase == 'train':
                net.train()
                print('(train)')
            else:
                if (epoch + 1) % 10 == 0:
                    net.eval()
                    print('-------------')
                    print('(val)')
                else:
                    # perform validation once every ten times
                    continue

            # loop each mini-batch from data loader
            for images, targets in dataloaders_dict[phase]:

                images = images.to(device)
                targets = [ann.to(device) for ann in targets]

                # initialize optimizer
                optimizer.zero_grad()

                # calculate forward
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = net(images)

                    # calculate loss
                    loss_l, loss_c = criterion(outputs, targets)
                    loss = loss_l + loss_c

                    if phase == 'train':
                        # back propagate when training
                        loss.backward()  # calculate gradient

                        nn.utils.clip_grad_value_(
                            net.parameters(), clip_value=Parameters.CLIP_VALUE)

                        optimizer.step()  # update parameters

                        if iteration % 10 == 0:  # display loss once every ten iterations
                            t_iter_finish = time.time()
                            duration = t_iter_finish - t_iter_start
                            print(
                                'iter {} || Loss: {:.4f} || 10iter: {:.4f} sec.'
                                .format(iteration, loss.item(), duration))
                            t_iter_start = time.time()

                        epoch_train_loss += loss.item()
                        iteration += 1

                    else:
                        epoch_val_loss += loss.item()

        # loss and accuracy rate of each phase of epoch
        t_epoch_finish = time.time()

        # keep latest epoch loss
        if epoch_train_loss != 0.0:
            num_total = len(dataloaders_dict['train'])
            latest_epoch_train_loss = epoch_train_loss / num_total
        if epoch_val_loss != 0.0:
            num_total = len(dataloaders_dict['val'])
            latest_epoch_val_loss = epoch_val_loss / num_total

        print('-------------')
        print('epoch {} || Epoch_TRAIN_Loss:{:.4f} || Epoch_VAL_Loss:{:.4f}'.
              format(epoch + 1, latest_epoch_train_loss,
                     latest_epoch_val_loss))
        print('timer:  {:.4f} sec.'.format(t_epoch_finish - t_epoch_start))
        t_epoch_start = time.time()

        statistics(epoch + 1, latest_epoch_train_loss, None,
                   latest_epoch_val_loss, None)

        writer.add_scalar('main/loss', latest_epoch_train_loss, epoch + 1)
        if (epoch + 1) % 10 == 0:
            writer.add_scalar('test/loss', latest_epoch_val_loss, epoch + 1)

            model_path = os.path.join(Parameters.ABEJA_TRAINING_RESULT_DIR,
                                      f'ssd300_{str(epoch + 1)}.pth')
            torch.save(net.state_dict(), model_path)

        writer.flush()
        epoch_train_loss = 0.0
        epoch_val_loss = 0.0

    torch.save(net.state_dict(),
               os.path.join(Parameters.ABEJA_TRAINING_RESULT_DIR, 'model.pth'))
    writer.close()
Пример #15
0
from população import População
from individuo import Individuo
import tools
import os
from datetime import datetime
from multiprocessing import cpu_count
from multiprocessing import freeze_support

__author__ = '@arthurj'

entrada_crua = tools.download("1W0J-KlqjmaIwP5z5eqTos9fqBgjfg9KGnl5AwePIxdk",
                              "TFinal")  #tools.ler('caso_fup.csv')
entrada = tools.processar_entrada(entrada_crua)
res = None
somos = set()
maxi = 100
count = 0
while len(somos) < 5 * cpu_count() + 1:
    count += 1
    if count > 1000:
        print("[ATENÇÃO!] Atingiu o limite de tentativas de criar somos")
        break
    try:
        s = Individuo(*entrada)
        somos.add(s)
    except Exception as e:
        tools.should_raise(e)

print('Quantidade de somos iniciais:', len(somos), '\n' + '.' * 60)

Пример #16
0
 def download_weights(self):
     try: os.mkdir('./weights')
     except Exception: pass
     tools.download('https://www.dropbox.com/s/otm6t0u2tmbj7sd/cnn.hdf5?dl=1', './weights/cnn.hdf5')
     tools.download('https://www.dropbox.com/s/t6n9x9pvt5tlvj8/rnn.hdf5?dl=1', './weights/rnn.hdf5')