Esempio n. 1
0
def get_badge(handle, website):
    q = None or request.args.get('logo')
    display_logo = True if (q and q.lower() == 'true') else False
    logo = logos[website]
    link = None or request.args.get('link')
    display_link = True if (len(str(link)) > 4) else False
    x = get_info(handle, website)
    rating, color = str(x[0]), str(x[1])
    text = website_text[website.lower()]

    if display_logo:
        if display_link:
            badge = pybadges.badge(left_text=text, right_text=rating,
                                   right_color=color, logo=logo, embed_logo=True, left_link=link)
        else:
            badge = pybadges.badge(
                left_text=text, right_text=rating, right_color=color, logo=logo, embed_logo=True)
    else:
        if display_link:
            badge = pybadges.badge(
                left_text=text, right_text=rating, right_color=color, left_link=link)
        else:
            badge = pybadges.badge(
                left_text=text, right_text=rating, right_color=color)
    response = flask.make_response(badge)
    response.content_type = 'image/svg+xml'
    return response
Esempio n. 2
0
    def composeBlueprint(self, blueprintid):
        blueprint = self.findBlueprint(blueprintid)
        if blueprint == None:
            return None

        info = data.get_info(data.blueprint, blueprint.baseid)

        if self.copper < info['copper']:
            return None

        for i in xrange(1, 7):
            if wl.tonumber(info["mid" + str(i)]) == 0:
                continue

            material = self.findMaterial(wl.tonumber(info["mid" + str(i)]))
            if material.num < wl.tonumber(info["mnum" + str(i)]):
                return None

        self.copper -= info['copper']
        self.save()

        for i in xrange(1, 7):
            if wl.tonumber(info["mid" + str(i)]) == 0:
                continue

            material = self.findMaterial(wl.tonumber(info["mid" + str(i)]))
            material.num -= wl.tonumber(info["mnum" + str(i)])
            material.save()

        return self.addEquip(info['equipid'])
Esempio n. 3
0
def main_func():
    try:
        if datetime.datetime.now().day != update_date[-1]:
            update_date.append(datetime.datetime.now().day)
            update_all()
    except IndexError:
        update_date.append(datetime.datetime.now().day)
        update_all()
    finish = False
    name = request.form.get("table", False)
    if name not in new_table and name:
        new_table[0] = name
    user = request.form.get("user", False)
    if user not in new_table and user:
        new_table[1].append(user)
    if not user:
        finish = True
    if not new_table[0] or len(new_table[1]) < 1:
        finish = False
    if finish:
        database_insert(new_table[0], get_info(new_table[1],
                                               len(new_table[1])))
        for u in reversed(range(len(new_table[1]) - 1)):
            update(new_table[0], u)
        new_table.pop(0)
        new_table.pop(0)
        new_table.append(0)
        new_table.append([])
    data_dict = analyze_all()
    return render_template("index.html", data=data_dict, id=1)
Esempio n. 4
0
 def composeBlueprint(self,blueprintid):
     blueprint = self.findBlueprint(blueprintid)
     if blueprint == None:
         return None
     
     info = data.get_info(data.blueprint,blueprint.baseid)
     
     if self.copper < info['copper']:
         return None
     
     for i in xrange(1,7):
         if wl.tonumber(info["mid"+str(i)]) == 0:
             continue
         
         material = self.findMaterial(wl.tonumber(info["mid"+str(i)]))
         if material.num < wl.tonumber(info["mnum"+str(i)]):
             return None
         
     self.copper -= info['copper']
     self.save()
     
     for i in xrange(1,7):
         if wl.tonumber(info["mid"+str(i)]) == 0:
             continue
         
         material = self.findMaterial(wl.tonumber(info["mid"+str(i)]))
         material.num -= wl.tonumber(info["mnum"+str(i)])
         material.save()
         
     
     return self.addEquip(info['equipid'])
Esempio n. 5
0
async def info_msg(message):
    if (message.content.lower().startswith('bb info')):
        if (len(message.content.split()) < 3):
            return await message.channel.send(
                'Incorrect format (example: bb info Ulgrim)')
        name = message.content[8:].lower()
        return await message.channel.send(get_info(name))
Esempio n. 6
0
def show_info():
    """
    Return user info (JSON format)
    :return String: (JSON)
    """
    info = [item.serialize for item in get_info()]
    res = make_response(jsonify(info))
    res.headers['Access-Control-Allow-Origin'] = '*'
    return res, 200
Esempio n. 7
0
 def addBlueprint(self,blueprintid):
     blueinfo = data.get_info(data.blueprint,blueprintid)
     if blueinfo != None:
         blueprint = self.findBlueprint(blueprintid)
         if blueprint != None:
             return blueprint
         blueprint = self.blueprint_set.create(baseid=blueprintid)
         blueprint.save()
         return blueprint
     return None
Esempio n. 8
0
 def addBlueprint(self, blueprintid):
     blueinfo = data.get_info(data.blueprint, blueprintid)
     if blueinfo != None:
         blueprint = self.findBlueprint(blueprintid)
         if blueprint != None:
             return blueprint
         blueprint = self.blueprint_set.create(baseid=blueprintid)
         blueprint.save()
         return blueprint
     return None
Esempio n. 9
0
    def addMaterial(self, materialid, num):
        info = data.get_info(data.material, materialid)
        if info != None:
            material = self.findMaterial(materialid)
            if material == None:
                material = self.material_set.create(baseid=materialid)

            material.num += num
            material.save()
            return material
        return None
Esempio n. 10
0
File: child.py Progetto: ND-SCL/NAQS
 def __init__(self, arch_paras=None, quan_paras=None, dataset='CIFAR10'):
     self.input_shape, self.num_classes = data.get_info(dataset)
     self.dataset = dataset
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     if arch_paras is not None:
         self.model = get_model(self.input_shape, arch_paras,
                                self.num_classes, self.device)
         self.optimizer = get_optimizer(self.model, 'SGD')
     if quan_paras is not None:
         self.quan_paras = quan_paras
Esempio n. 11
0
 def addMaterial(self,materialid,num):
     info = data.get_info(data.material,materialid)
     if info != None:
         material = self.findMaterial(materialid)
         if material == None:
             material = self.material_set.create(baseid=materialid)
         
         material.num += num
         material.save()
         return material
     return None
Esempio n. 12
0
 def addEquip(self,equipid):
     equipbase = data.get_info(data.equipmentbase,equipid)
     if equipbase != None:
         equip = self.equipment_set.create(baseid=equipid)
         if not self.isFullEquip():
             equip.save()
         else:
             mail.sys_send(data.lang['MAIL_TITLE_EQUIP'], data.lang['MAIL_CONTENT_EQUIP'], "1,addEquip,%d;" % equipid, self.userid)
         
         return equip
     return None
Esempio n. 13
0
 def addSoul(self,soulid):
     soulbase = data.get_info(data.soulbase,soulid)
     if soulbase != None:
         soul = self.soul_set.create(baseid=soulid)
         self.meet(soulid)
         if not self.isFullSoul():
             soul.save()
         else:
             mail.sys_send(data.lang['MAIL_TITLE_SOUL'], data.lang['MAIL_CONTENT_SOUL'], "1,addSoul,%d;" % soulid, self.userid)
         return soul
     return None
Esempio n. 14
0
    def addEquip(self, equipid):
        equipbase = data.get_info(data.equipmentbase, equipid)
        if equipbase != None:
            equip = self.equipment_set.create(baseid=equipid)
            if not self.isFullEquip():
                equip.save()
            else:
                mail.sys_send(data.lang['MAIL_TITLE_EQUIP'],
                              data.lang['MAIL_CONTENT_EQUIP'],
                              "1,addEquip,%d;" % equipid, self.userid)

            return equip
        return None
Esempio n. 15
0
 def addSoul(self, soulid):
     soulbase = data.get_info(data.soulbase, soulid)
     if soulbase != None:
         soul = self.soul_set.create(baseid=soulid)
         self.meet(soulid)
         if not self.isFullSoul():
             soul.save()
         else:
             mail.sys_send(data.lang['MAIL_TITLE_SOUL'],
                           data.lang['MAIL_CONTENT_SOUL'],
                           "1,addSoul,%d;" % soulid, self.userid)
         return soul
     return None
Esempio n. 16
0
def get_roll_button():
    if roll_entry.get().isnumeric() == True:
        r = int(roll_entry.get())
        roll.set(r)
    else:
        r = 0

    roll_list = data.roll_list

    if r in roll_list:
        data_list = data.get_info(r, group.get(), date.get())

        for i in range(len(data_list) - 4):
            label_list[i].config(text=data_list[i])

        mo = data_list[8]
        if str(mo) == "nan":
            mo = ""
        vm = data_list[9]
        if str(vm) == "nan":
            vm = ""
        tm = data_list[10]
        if str(tm) == "nan":
            tm = "0"
        vr = data_list[11]
        if str(vr) == "nan":
            vr = ""

        label_list[-1].config(text=tm)
        mark_obtained_entry.config(textvariable=mark_obtained.set(mo))
        viva_entry.config(textvariable=viva_marks.set(vm))
        remark_text.delete("1.0", "end")
        remark_text.insert(INSERT, vr)

        mark_obtained_entry.focus()

    else:

        for i in range(len(label_list)):
            label_list[i].config(text="N/A")

        mark_obtained_entry.config(textvariable=string_variable.set(""))
        viva_entry.config(textvariable=viva_marks.set(""))
        remark_text.delete("1.0", "end")

        messagebox.showinfo("Invalid Roll", "Please Enter A Valid Roll!")
        roll_entry.focus()
Esempio n. 17
0
def main_func():
    finish = False
    name = request.form.get("table", False)
    if name not in new_table and name:
        new_table[0] = name
    user = request.form.get("user", False)
    if user not in new_table and user:
        new_table[1].append(user)
    if not user:
        finish = True
    if not new_table[0] or len(new_table[1]) < 1:
        finish = False
    if finish:
        database_insert(new_table[0], get_info(new_table[1], 1))
        new_table.pop(0)
        new_table.pop(0)
        new_table.append(0)
        new_table.append([])
    data_dict = analyze_all()
    return render_template("index.html", data=data_dict)
Esempio n. 18
0
def process_message():
    if "context" in request.form:
        response = assistant.message(
            workspace_id=os.environ['IBM_WORKSPACE_ID'],
            input={'text': request.form['text']},
            context=json.loads(request.form['context']))
    else:
        response = assistant.message(
            workspace_id=os.environ['IBM_WORKSPACE_ID'], input={'text': ""})

    retval = {}
    if response["output"]["text"][-1][:2] == "%%":
        retval["type"] = "info"
        retval["text"] = get_info(response["output"]["text"])

    else:
        retval["type"] = "question"
        retval["text"] = response["output"]["text"]
        retval["context"] = response["context"]

    return jsonify(retval)
Esempio n. 19
0
        x = self.fc(x)
        return x


if __name__ == '__main__':
    import data
    import backend
    import time

    dataset = 'CIFAR10'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    train_data, val_data = data.get_data(dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=128)
    input_shape, num_classes = data.get_info(dataset)
    model = SimpleNet().to(device)
    if device.type == 'cuda':
        print("using parallel data")
        model = torch.nn.DataParallel(model)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=1e-4,
                          nesterov=True)
    # optimizer = optim.Adam(
    #     model.parameters(),
    #     lr=0.001,
    #     betas=(0.9, 0.999),
    #     eps=1e-8,
    #     weight_decay=0.0,
def sync_search(device, dir='experiment'):
    dir = os.path.join(
        dir,
        utility.cleanText(f"rLut-{args.rLUT}_rThroughput-{args.rThroughput}"))
    if os.path.exists(dir) is False:
        os.makedirs(dir)
    filepath = os.path.join(
        dir, utility.cleanText(f"joint_{args.episodes}-episodes"))
    logger = utility.get_logger(filepath)
    csvfile = open(filepath + '.csv', mode='w+', newline='')
    writer = csv.writer(csvfile)
    tb_writer = SummaryWriter(filepath)

    logger.info(f"INFORMATION")
    logger.info(f"mode: \t\t\t\t\t {'joint'}")
    logger.info(f"dataset: \t\t\t\t {args.dataset}")
    logger.info(f"number of child network layers: \t {args.layers}")
    logger.info(f"seed: \t\t\t\t {args.seed}")
    logger.info(f"gpu: \t\t\t\t {args.gpu}")
    logger.info(f"include batchnorm: \t\t\t {args.batchnorm}")
    logger.info(f"include stride: \t\t\t {not args.no_stride}")
    logger.info(f"include pooling: \t\t\t {not args.no_pooling}")
    logger.info(f"skip connection: \t\t\t {args.skip}")
    logger.info(f"required # LUTs: \t\t\t {args.rLUT}")
    logger.info(f"required throughput: \t\t\t {args.rThroughput}")
    logger.info(f"Assumed frequency: \t\t\t {CLOCK_FREQUENCY}")
    logger.info(f"training epochs: \t\t\t {args.epochs}")
    logger.info(f"data augmentation: \t\t\t {args.augment}")
    logger.info(f"batch size: \t\t\t\t {args.batch_size}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"architecture episodes: \t\t\t {args.episodes}")
    logger.info(f"using multi gpus: \t\t\t {args.multi_gpu}")
    logger.info(f"architecture space: ")
    for name, value in ARCH_SPACE.items():
        logger.info(name + f": \t\t\t\t {value}")
    logger.info(f"quantization space: ")
    for name, value in QUAN_SPACE.items():
        logger.info(name + f": \t\t\t {value}")

    agent = Agent({
        **ARCH_SPACE,
        **QUAN_SPACE
    },
                  args.layers,
                  lr=args.learning_rate,
                  device=torch.device('cpu'),
                  skip=args.skip)

    train_data, val_data = data.get_data(args.dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         augment=args.augment)

    input_shape, num_classes = data.get_info(args.dataset)
    ## (3,32,32) -> (1,3,32,32) add batch dimension
    sample_input = utility.get_sample_input(device, input_shape)

    writer.writerow(["ID"] +
                    ["Layer {}".format(i)
                     for i in range(args.layers)] + ["Accuracy"] + [
                         "Partition (Tn, Tm)", "Partition (#LUTs)",
                         "Partition (#cycles)", "Total LUT", "Total Throughput"
                     ] + ["Time"])

    arch_id, total_time = 0, 0
    best_reward = float('-inf')

    logger.info('=' * 50 +
                "Start exploring architecture & quantization space" + '=' * 50)
    best_samples = BestSamples(5)

    for e in range(args.episodes):
        logger.info('-' * 130)
        arch_id += 1
        start = time.time()
        rollout, paras = agent.rollout()
        logger.info("Sample Architecture ID: {}, Sampled actions: {}".format(
            arch_id, rollout))
        arch_paras, quan_paras = utility.split_paras(paras)

        fpga_model = FPGAModel(rLUT=args.rLUT,
                               rThroughput=args.rThroughput,
                               arch_paras=arch_paras,
                               quan_paras=quan_paras)

        if fpga_model.validate():

            model, optimizer = child.get_model(input_shape,
                                               arch_paras,
                                               num_classes,
                                               device,
                                               multi_gpu=args.multi_gpu,
                                               do_bn=args.batchnorm)

            if args.verbosity > 1:
                print(model)
                torchsummary.summary(model, input_shape)

            if args.adapt:
                num_w = utility.get_net_param(model)
                macs = utility.get_net_macs(model, sample_input)
                tb_writer.add_scalar('num_param', num_w, arch_id)
                tb_writer.add_scalar('macs', macs, arch_id)
                if args.verbosity > 1:
                    print(f"# of param: {num_w}, macs: {macs}")

            _, val_acc = backend.fit(model,
                                     optimizer,
                                     train_data,
                                     val_data,
                                     quan_paras=quan_paras,
                                     epochs=args.epochs,
                                     verbosity=args.verbosity)
        else:
            val_acc = 0

        if args.adapt:
            ## TODO: how to make arch_reward function with macs and latency?
            arch_reward = val_acc
        else:
            arch_reward = val_acc

        agent.store_rollout(rollout, arch_reward)
        end = time.time()
        ep_time = end - start
        total_time += ep_time
        best_samples.register(arch_id, rollout, arch_reward)

        tb_writer.add_scalar('val_acc', val_acc, arch_id)
        tb_writer.add_scalar('arch_reward', arch_reward, arch_id)

        if arch_reward > best_reward:
            best_reward = arch_reward
            tb_writer.add_scalar('best_reward', best_reward, arch_id)
            tb_writer.add_graph(model.eval(), (sample_input, ))

        writer.writerow([arch_id] +
                        [str(paras[i])
                         for i in range(args.layers)] + [arch_reward] +
                        list(fpga_model.get_info()) + [ep_time])
        logger.info(f"Reward: {arch_reward}, " + f"Elasped time: {ep_time}, " +
                    f"Average time: {total_time/(e+1)}")
        logger.info(f"Best Reward: {best_samples.reward_list[0]}, " +
                    f"ID: {best_samples.id_list[0]}, " +
                    f"Rollout: {best_samples.rollout_list[0]}")
    logger.info('=' * 50 +
                "Architecture & quantization sapce exploration finished" +
                '=' * 50)
    logger.info(f"Total elasped time: {total_time}")
    logger.info(f"Best samples: {best_samples}")
    tb_writer.close()
    csvfile.close()
Esempio n. 21
0
File: main.py Progetto: ND-SCL/NAQS
def nas(device, dir='experiment'):
    filepath = os.path.join(dir, f"nas ({args.episodes} episodes)")
    logger = get_logger(filepath)
    csvfile = open(filepath + '.csv', mode='w+', newline='')
    writer = csv.writer(csvfile)
    logger.info(f"INFORMATION")
    logger.info(f"mode: \t\t\t\t\t {'nas'}")
    logger.info(f"dataset: \t\t\t\t {args.dataset}")
    logger.info(f"number of child network layers: \t {args.layers}")
    logger.info(f"include stride: \t\t\t {not args.no_stride}")
    logger.info(f"include pooling: \t\t\t {not args.no_pooling}")
    logger.info(f"skip connection: \t\t\t {args.skip}")
    logger.info(f"training epochs: \t\t\t {args.epochs}")
    logger.info(f"data augmentation: \t\t\t {args.augment}")
    logger.info(f"batch size: \t\t\t\t {args.batch_size}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"architecture episodes: \t\t\t {args.episodes}")
    logger.info(f"using multi gpus: \t\t\t {args.multi_gpu}")
    logger.info(f"architecture space: ")
    for name, value in ARCH_SPACE.items():
        logger.info(name + f": \t\t\t\t {value}")
    agent = Agent(ARCH_SPACE,
                  args.layers,
                  lr=args.learning_rate,
                  device=torch.device('cpu'),
                  skip=args.skip)
    train_data, val_data = data.get_data(args.dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         augment=args.augment)
    input_shape, num_classes = data.get_info(args.dataset)
    writer.writerow(["ID"] +
                    ["Layer {}".format(i)
                     for i in range(args.layers)] + ["Accuracy", "Time"])
    arch_id, total_time = 0, 0
    logger.info('=' * 50 + "Start exploring architecture space" + '=' * 50)
    logger.info('-' * len("Start exploring architecture space"))
    best_samples = BestSamples(5)
    for e in range(args.episodes):
        arch_id += 1
        start = time.time()
        arch_rollout, arch_paras = agent.rollout()
        logger.info("Sample Architecture ID: {}, Sampled actions: {}".format(
            arch_id, arch_rollout))
        model, optimizer = child.get_model(input_shape,
                                           arch_paras,
                                           num_classes,
                                           device,
                                           multi_gpu=args.multi_gpu,
                                           do_bn=True)
        _, arch_reward = backend.fit(model,
                                     optimizer,
                                     train_data,
                                     val_data,
                                     epochs=args.epochs,
                                     verbosity=args.verbosity)
        agent.store_rollout(arch_rollout, arch_reward)
        end = time.time()
        ep_time = end - start
        total_time += ep_time
        best_samples.register(arch_id, arch_rollout, arch_reward)
        writer.writerow([arch_id] +
                        [str(arch_paras[i]) for i in range(args.layers)] +
                        [arch_reward] + [ep_time])
        logger.info(f"Architecture Reward: {arch_reward}, " +
                    f"Elasped time: {ep_time}, " +
                    f"Average time: {total_time/(e+1)}")
        logger.info(f"Best Reward: {best_samples.reward_list[0]}, " +
                    f"ID: {best_samples.id_list[0]}, " +
                    f"Rollout: {best_samples.rollout_list[0]}")
        logger.info('-' * len("Start exploring architecture space"))
    logger.info('=' * 50 + "Architecture sapce exploration finished" +
                '=' * 50)
    logger.info(f"Total elasped time: {total_time}")
    logger.info(f"Best samples: {best_samples}")
    csvfile.close()
Esempio n. 22
0
def tune(paras=[], dataset='CIFAR10'):
    # quantize = True if 'act_num_int_bits' in paras[0] else False
    arch_paras, quan_paras = utility.split_paras(paras)
    input_shape, num_classes = data.get_info(dataset)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    train_data, val_data = data.get_data(
        name=dataset, device=device,
        shuffle=True, batch_size=args.batch_size, augment=args.augment)
    model, _ = get_model(
        input_shape, arch_paras, num_classes,
        device=device,
        multi_gpu=args.multi_gpu,
        do_bn=args.do_bn)
    optimizer, lr_schedule = get_optimizer(args.optimizer, model)
    best_acc = 0
    best_quan_acc = 0
    cvsfile = open('tune.csv', mode='w+', newline='')
    writer = csv.writer(cvsfile)
    writer.writerow(['Epoch', 'train acc', 'val acc', 'quan acc'])
    for epoch in range(1, args.epochs+1):
        # print('before training ', model.conv_1.bias, model.conv_2.bias)
        epoch_lr = lr_schedule(optimizer, epoch)
        print('-' * 80)
        print(f"Epoch {epoch} \t LR: {epoch_lr}" +
              f"\t Best Acc: {best_acc:6.3%}" +
              (f"\t quantized: {best_quan_acc:6.3%}" if quan_paras is not None
               else ''))
        print("Training ...")
        running_loss, running_correction, num_batches = 0, 0, 0
        running_total = 0
        bar_width = 30
        model.train()
        start = time.time()
        for input_batch, label_batch in train_data:
            batch_loss, batch_correction = backend.batch_fit(
                model, input_batch, label_batch, optimizer)
            end = time.time()
            running_loss += batch_loss
            running_correction += batch_correction
            num_batches += 1
            running_total += input_batch.size(0)
            train_acc = running_correction / running_total
            train_loss = running_loss / running_total
            epoch_percentage = num_batches / len(train_data)
            print('|' + '='*(math.ceil(bar_width * epoch_percentage)-1) +
                  '>' +
                  ' '*(bar_width - math.ceil(bar_width * epoch_percentage)) +
                  '|' + f"{epoch_percentage:4.1%}-{end-start:4.2f}s" +
                  f"\t loss: {train_loss:.5}, acc: {train_acc:6.3%}  ",
                  end=('\r' if epoch_percentage < 1 else '\n'))
        # print('after training ', model.conv_1.bias, model.conv_2.bias)
        print("Training finished, start evaluating ...")
        model.eval()
        running_loss, running_correction, num_batches = 0, 0, 0
        running_total = 0
        start = time.time()
        for input_batch, label_batch in val_data:
            with torch.no_grad():
                batch_loss, batch_correction = backend.batch_fit(
                    model, input_batch, label_batch)
                end = time.time()
                running_loss += batch_loss
                running_correction += batch_correction
                num_batches += 1
                running_total += input_batch.size(0)
                val_acc = running_correction / running_total
                val_loss = running_loss / running_total
                epoch_percentage = num_batches / len(val_data)
            print('|' + '='*(math.ceil(bar_width * epoch_percentage)-1) +
                  '>' +
                  ' '*(bar_width - math.ceil(bar_width * epoch_percentage)) +
                  '|' + f"{epoch_percentage:4.1%}-{end-start:4.2f}s" +
                  f"\t loss: {val_loss:.5}, acc: {val_acc:6.3%}  ",
                  end=('\r' if epoch_percentage < 1 else '\n'))
        if val_acc > best_acc:
            best_acc = val_acc
        quan_acc = 'N/A'
        if quan_paras is not None:
            print("Start evaluating with quantization ...")
            running_loss, running_correction, num_batches = 0, 0, 0
            running_total = 0
            start = time.time()
            for input_batch, label_batch in val_data:
                with torch.no_grad():
                    batch_loss, batch_correction = backend.batch_fit(
                        model, input_batch, label_batch, quan_paras=quan_paras)
                    end = time.time()
                    running_loss += batch_loss
                    running_correction += batch_correction
                    num_batches += 1
                    running_total += input_batch.size(0)
                    quan_acc = running_correction / running_total
                    quan_loss = running_loss / running_total
                    epoch_percentage = num_batches / len(val_data)
                print('|' + '='*(math.ceil(bar_width * epoch_percentage)-1) +
                      '>' + ' '*(bar_width - math.ceil(
                        bar_width * epoch_percentage)) +
                      '|' + f"{epoch_percentage:4.1%}-{end-start:4.2f}s" +
                      f"\t loss: {quan_loss:.5}, acc: {quan_acc:6.3%}  ",
                      end=('\r' if epoch_percentage < 1 else '\n'))
            if quan_acc > best_quan_acc:
                best_quan_acc = quan_acc
        writer.writerow([str(epoch), train_acc, val_acc, quan_acc])
    print(f"Finished tuning ... final accuracy: {best_acc:6.3%}, " +
          f"quantized accuracy :{best_quan_acc:6.3%}")
    para_num, para_size = compute_parameter_number(model.graph, quan_paras)
    print(f"Total number of parameters: {para_num}")
    print(f"Total parameter size: {para_size if para_size > 0 else 'N/A'}")
Esempio n. 23
0
File: main.py Progetto: ND-SCL/NAQS
def nested_search(device, dir='experiment'):
    dir = os.path.join(dir,
                       f"rLut={args.rLUT}, rThroughput={args.rThroughput}")
    if os.path.exists(dir) is False:
        os.makedirs(dir)
    filepath = os.path.join(dir, f"nested ({args.episodes} episodes)")
    logger = get_logger(filepath)
    csvfile = open(filepath + '.csv', mode='w+', newline='')
    writer = csv.writer(csvfile)
    logger.info(f"INFORMATION")
    logger.info(f"mode: \t\t\t\t\t {'nested'}")
    logger.info(f"dataset: \t\t\t\t {args.dataset}")
    logger.info(f"number of child network layers: \t {args.layers}")
    logger.info(f"include stride: \t\t\t {not args.no_stride}")
    logger.info(f"include pooling: \t\t\t {not args.no_pooling}")
    logger.info(f"skip connection: \t\t\t {args.skip}")
    logger.info(f"required # LUTs: \t\t\t {args.rLUT}")
    logger.info(f"required throughput: \t\t\t {args.rThroughput}")
    logger.info(f"Assumed frequency: \t\t\t {CLOCK_FREQUENCY}")
    logger.info(f"training epochs: \t\t\t {args.epochs}")
    logger.info(f"data augmentation: \t\t\t {args.augment}")
    logger.info(f"batch size: \t\t\t\t {args.batch_size}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"architecture episodes: \t\t\t {args.episodes1}")
    logger.info(f"quantization episodes: \t\t\t {args.episodes2}")
    logger.info(f"using multi gpus: \t\t\t {args.multi_gpu}")
    logger.info(f"architecture space: ")
    for name, value in ARCH_SPACE.items():
        logger.info(name + f": \t\t\t\t {value}")
    logger.info(f"quantization space: ")
    for name, value in QUAN_SPACE.items():
        logger.info(name + f": \t\t\t {value}")
    train_data, val_data = data.get_data(args.dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         augment=args.augment)
    input_shape, num_classes = data.get_info(args.dataset)
    writer.writerow(["ID"] +
                    ["Layer {}".format(i)
                     for i in range(args.layers)] + ["Accuracy"] + [
                         "Partition (Tn, Tm)", "Partition (#LUTs)",
                         "Partition (#cycles)", "Total LUT", "Total Throughput"
                     ] + ["Time"])
    arch_agent = Agent(ARCH_SPACE,
                       args.layers,
                       lr=args.learning_rate,
                       device=torch.device('cpu'),
                       skip=args.skip)
    arch_id, total_time = 0, 0
    logger.info('=' * 50 + "Start exploring architecture space" + '=' * 50)
    best_arch = BestSamples(5)
    for e1 in range(args.episodes1):
        logger.info('-' * 130)
        arch_id += 1
        start = time.time()
        arch_rollout, arch_paras = arch_agent.rollout()
        logger.info("Sample Architecture ID: {}, Sampled arch: {}".format(
            arch_id, arch_rollout))
        model, optimizer = child.get_model(input_shape,
                                           arch_paras,
                                           num_classes,
                                           device,
                                           multi_gpu=args.multi_gpu,
                                           do_bn=False)
        backend.fit(model,
                    optimizer,
                    train_data,
                    val_data,
                    epochs=args.epochs,
                    verbosity=args.verbosity)
        quan_id = 0
        best_quan_reward = -1
        logger.info('=' * 50 + "Start exploring quantization space" + '=' * 50)
        quan_agent = Agent(QUAN_SPACE,
                           args.layers,
                           lr=args.learning_rate,
                           device=torch.device('cpu'),
                           skip=False)
        for e2 in range(args.episodes2):
            quan_id += 1
            quan_rollout, quan_paras = quan_agent.rollout()
            fpga_model = FPGAModel(rLUT=args.rLUT,
                                   rThroughput=args.rThroughput,
                                   arch_paras=arch_paras,
                                   quan_paras=quan_paras)
            if fpga_model.validate():
                _, quan_reward = backend.fit(model,
                                             optimizer,
                                             val_data=val_data,
                                             quan_paras=quan_paras,
                                             epochs=1,
                                             verbosity=args.verbosity)
            else:
                quan_reward = 0
            logger.info(
                "Sample Quantization ID: {}, Sampled Quantization: {}, reward: {}"
                .format(quan_id, quan_rollout, quan_reward))
            quan_agent.store_rollout(quan_rollout, quan_reward)
            if quan_reward > best_quan_reward:
                best_quan_reward = quan_reward
                best_quan_rollout, best_quan_paras = quan_rollout, quan_paras
        logger.info('=' * 50 + "Quantization space exploration finished" +
                    '=' * 50)
        arch_reward = best_quan_reward
        arch_agent.store_rollout(arch_rollout, arch_reward)
        end = time.time()
        ep_time = end - start
        total_time += ep_time
        best_arch.register(
            arch_id,
            utility.combine_rollout(arch_rollout, best_quan_rollout,
                                    args.layers), arch_reward)
        writer.writerow([arch_id] + [
            str(arch_paras[i]) + '\n' + str(best_quan_paras[i])
            for i in range(args.layers)
        ] + [arch_reward] + list(fpga_model.get_info()) + [ep_time])
        logger.info(f"Reward: {arch_reward}, " + f"Elasped time: {ep_time}, " +
                    f"Average time: {total_time/(e1+1)}")
        logger.info(f"Best Reward: {best_arch.reward_list[0]}, " +
                    f"ID: {best_arch.id_list[0]}, " +
                    f"Rollout: {best_arch.rollout_list[0]}")
    logger.info('=' * 50 +
                "Architecture & quantization sapce exploration finished" +
                '=' * 50)
    logger.info(f"Total elasped time: {total_time}")
    logger.info(f"Best samples: {best_arch}")
    csvfile.close()
Esempio n. 24
0
File: main.py Progetto: ND-SCL/NAQS
def quantization_search(device, dir='experiment'):
    dir = os.path.join(dir,
                       f"rLut={args.rLUT}, rThroughput={args.rThroughput}")
    if os.path.exists(dir) is False:
        os.makedirs(dir)
    filepath = os.path.join(dir, f"quantization ({args.episodes} episodes)")
    logger = get_logger(filepath)
    csvfile = open(filepath + '.csv', mode='w+', newline='')
    writer = csv.writer(csvfile)
    logger.info(f"INFORMATION")
    logger.info(f"mode: \t\t\t\t\t {'quantization'}")
    logger.info(f"dataset: \t\t\t\t {args.dataset}")
    logger.info(f"number of child network layers: \t {args.layers}")
    logger.info(f"include stride: \t\t\t {not args.no_stride}")
    logger.info(f"include pooling: \t\t\t {not args.no_pooling}")
    logger.info(f"skip connection: \t\t\t {args.skip}")
    logger.info(f"required # LUTs: \t\t\t {args.rLUT}")
    logger.info(f"required throughput: \t\t\t {args.rThroughput}")
    logger.info(f"Assumed frequency: \t\t\t {CLOCK_FREQUENCY}")
    logger.info(f"training epochs: \t\t\t {args.epochs}")
    logger.info(f"data augmentation: \t\t\t {args.augment}")
    logger.info(f"batch size: \t\t\t\t {args.batch_size}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"architecture episodes: \t\t\t {args.episodes}")
    logger.info(f"using multi gpus: \t\t\t {args.multi_gpu}")
    logger.info(f"architecture space: ")
    # for name, value in ARCH_SPACE.items():
    #     logger.info(name + f": \t\t\t\t {value}")
    logger.info(f"quantization space: ")
    for name, value in QUAN_SPACE.items():
        logger.info(name + f": \t\t\t {value}")
    agent = Agent(QUAN_SPACE,
                  args.layers,
                  lr=args.learning_rate,
                  device=torch.device('cpu'),
                  skip=False)
    train_data, val_data = data.get_data(args.dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         augment=args.augment)
    input_shape, num_classes = data.get_info(args.dataset)
    writer.writerow(["ID"] +
                    ["Layer {}".format(i)
                     for i in range(args.layers)] + ["Accuracy"] + [
                         "Partition (Tn, Tm)", "Partition (#LUTs)",
                         "Partition (#cycles)", "Total LUT", "Total Throughput"
                     ] + ["Time"])
    child_id, total_time = 0, 0
    logger.info('=' * 50 + "Start exploring quantization space" + '=' * 50)
    best_samples = BestSamples(5)
    arch_paras = [{
        'filter_height': 3,
        'filter_width': 3,
        'stride_height': 1,
        'stride_width': 1,
        'num_filters': 64,
        'pool_size': 1
    }, {
        'filter_height': 7,
        'filter_width': 5,
        'stride_height': 1,
        'stride_width': 1,
        'num_filters': 48,
        'pool_size': 1
    }, {
        'filter_height': 5,
        'filter_width': 5,
        'stride_height': 2,
        'stride_width': 1,
        'num_filters': 48,
        'pool_size': 1
    }, {
        'filter_height': 3,
        'filter_width': 5,
        'stride_height': 1,
        'stride_width': 1,
        'num_filters': 64,
        'pool_size': 1
    }, {
        'filter_height': 5,
        'filter_width': 7,
        'stride_height': 1,
        'stride_width': 1,
        'num_filters': 36,
        'pool_size': 1
    }, {
        'filter_height': 3,
        'filter_width': 1,
        'stride_height': 1,
        'stride_width': 2,
        'num_filters': 64,
        'pool_size': 2
    }]
    model, optimizer = child.get_model(input_shape,
                                       arch_paras,
                                       num_classes,
                                       device,
                                       multi_gpu=args.multi_gpu,
                                       do_bn=False)
    _, val_acc = backend.fit(model,
                             optimizer,
                             train_data=train_data,
                             val_data=val_data,
                             epochs=args.epochs,
                             verbosity=args.verbosity)
    print(val_acc)
    for e in range(args.episodes):
        logger.info('-' * 130)
        child_id += 1
        start = time.time()
        quan_rollout, quan_paras = agent.rollout()
        logger.info("Sample Quantization ID: {}, Sampled actions: {}".format(
            child_id, quan_rollout))
        fpga_model = FPGAModel(rLUT=args.rLUT,
                               rThroughput=args.rThroughput,
                               arch_paras=arch_paras,
                               quan_paras=quan_paras)
        if fpga_model.validate():
            _, reward = backend.fit(model,
                                    optimizer,
                                    val_data=val_data,
                                    quan_paras=quan_paras,
                                    epochs=1,
                                    verbosity=args.verbosity)
        else:
            reward = 0
        agent.store_rollout(quan_rollout, reward)
        end = time.time()
        ep_time = end - start
        total_time += ep_time
        best_samples.register(child_id, quan_rollout, reward)
        writer.writerow([child_id] +
                        [str(quan_paras[i]) for i in range(args.layers)] +
                        [reward] + list(fpga_model.get_info()) + [ep_time])
        logger.info(f"Reward: {reward}, " + f"Elasped time: {ep_time}, " +
                    f"Average time: {total_time/(e+1)}")
        logger.info(f"Best Reward: {best_samples.reward_list[0]}, " +
                    f"ID: {best_samples.id_list[0]}, " +
                    f"Rollout: {best_samples.rollout_list[0]}")
    logger.info('=' * 50 + "Quantization sapce exploration finished" +
                '=' * 50)
    logger.info(f"Total elasped time: {total_time}")
    logger.info(f"Best samples: {best_samples}")
    csvfile.close()
def nas(device, dir='experiment'):
    filepath = os.path.join(dir,
                            utility.cleanText(f"nas_{args.episodes}-episodes"))
    logger = utility.get_logger(filepath)
    csvfile = open(filepath + '.csv', mode='w+', newline='')
    writer = csv.writer(csvfile)
    tb_writer = SummaryWriter(filepath)
    logger.info(f"INFORMATION")
    logger.info(f"mode: \t\t\t\t\t {'nas'}")
    logger.info(f"dataset: \t\t\t\t {args.dataset}")
    logger.info(f"seed: \t\t\t\t {args.seed}")
    logger.info(f"gpu: \t\t\t\t {args.gpu}")
    logger.info(f"number of child network layers: \t {args.layers}")
    logger.info(f"include batchnorm: \t\t\t {args.batchnorm}")
    logger.info(f"include stride: \t\t\t {not args.no_stride}")
    logger.info(f"include pooling: \t\t\t {not args.no_pooling}")
    logger.info(f"skip connection: \t\t\t {args.skip}")
    logger.info(f"training epochs: \t\t\t {args.epochs}")
    logger.info(f"data augmentation: \t\t\t {args.augment}")
    logger.info(f"batch size: \t\t\t\t {args.batch_size}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"controller learning rate: \t\t {args.learning_rate}")
    logger.info(f"architecture episodes: \t\t\t {args.episodes}")
    logger.info(f"using multi gpus: \t\t\t {args.multi_gpu}")
    logger.info(f"architecture space: ")
    for name, value in ARCH_SPACE.items():
        logger.info(name + f": \t\t\t\t {value}")

    agent = Agent(ARCH_SPACE,
                  args.layers,
                  lr=args.learning_rate,
                  device=torch.device('cpu'),
                  skip=args.skip)
    train_data, val_data = data.get_data(args.dataset,
                                         device,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         augment=args.augment)

    input_shape, num_classes = data.get_info(args.dataset)
    ## (3,32,32) -> (1,3,32,32) add batch dimension
    sample_input = utility.get_sample_input(device, input_shape)

    ## write header
    if args.adapt:
        writer.writerow(["ID"] +
                        ["Layer {}".format(i) for i in range(args.layers)] +
                        ["Accuracy", "Time", "params", "macs", "reward"])

    else:
        writer.writerow(["ID"] +
                        ["Layer {}".format(i)
                         for i in range(args.layers)] + ["Accuracy", "Time"])

    arch_id, total_time = 0, 0
    best_reward = float('-inf')
    logger.info('=' * 50 + "Start exploring architecture space" + '=' * 50)
    logger.info('-' * len("Start exploring architecture space"))
    best_samples = BestSamples(5)

    for e in range(args.episodes):
        arch_id += 1
        start = time.time()
        arch_rollout, arch_paras = agent.rollout()
        logger.info("Sample Architecture ID: {}, Sampled actions: {}".format(
            arch_id, arch_rollout))
        ## get model
        model, optimizer = child.get_model(input_shape,
                                           arch_paras,
                                           num_classes,
                                           device,
                                           multi_gpu=args.multi_gpu,
                                           do_bn=args.batchnorm)

        if args.verbosity > 1:
            print(model)
            torchsummary.summary(model, input_shape)

        if args.adapt:
            num_w = utility.get_net_param(model)
            macs = utility.get_net_macs(model, sample_input)
            tb_writer.add_scalar('num_param', num_w, arch_id)
            tb_writer.add_scalar('macs', macs, arch_id)
            if args.verbosity > 1:
                print(f"# of param: {num_w}, macs: {macs}")

        ## train model and get val_acc
        _, val_acc = backend.fit(model,
                                 optimizer,
                                 train_data,
                                 val_data,
                                 epochs=args.epochs,
                                 verbosity=args.verbosity)

        if args.adapt:
            ## TODO: how to model arch_reward?? with num_w and macs?
            arch_reward = val_acc
        else:
            arch_reward = val_acc

        agent.store_rollout(arch_rollout, arch_reward)

        end = time.time()
        ep_time = end - start
        total_time += ep_time

        tb_writer.add_scalar('val_acc', val_acc, arch_id)
        tb_writer.add_scalar('arch_reward', arch_reward, arch_id)

        if arch_reward > best_reward:
            best_reward = arch_reward
            tb_writer.add_scalar('best_reward', best_reward, arch_id)
            tb_writer.add_graph(model.eval(), (sample_input, ))

        best_samples.register(arch_id, arch_rollout, arch_reward)
        if args.adapt:
            writer.writerow([arch_id] +
                            [str(arch_paras[i])
                             for i in range(args.layers)] + [val_acc] +
                            [ep_time] + [num_w] + [macs] + [arch_reward])
        else:
            writer.writerow([arch_id] +
                            [str(arch_paras[i]) for i in range(args.layers)] +
                            [val_acc] + [ep_time])
        logger.info(f"Architecture Reward: {arch_reward}, " +
                    f"Elasped time: {ep_time}, " +
                    f"Average time: {total_time/(e+1)}")
        logger.info(f"Best Reward: {best_samples.reward_list[0]}, " +
                    f"ID: {best_samples.id_list[0]}, " +
                    f"Rollout: {best_samples.rollout_list[0]}")

        logger.info('-' * len("Start exploring architecture space"))
    logger.info('=' * 50 + "Architecture sapce exploration finished" +
                '=' * 50)
    logger.info(f"Total elasped time: {total_time}")
    logger.info(f"Best samples: {best_samples}")
    tb_writer.close()
    csvfile.close()