示例#1
0
    def test_get_triangles_return_trianlge_with_arg_normals(self):
        model = Model(vertices=[
            [0.0, 2.0, 2.0],
            [0.0, 0.0, 0.0],
            [2.0, 0.0, 0.0],
        ],
                      normals=[
                          [1.0, 2.0, 3.0],
                          [4.0, 5.0, 6.0],
                      ],
                      faces=[
                          [[1, 0, 1], [2, 0, 2], [3, 0, 2]],
                          [[2, 0, 1], [1, 0, 1], [3, 0, 1]],
                      ])

        expected_triangles = [[
            [3.0, 4.0, 5.0],
            [0.0, 2.0, 2.0],
            [0.0, 0.0, 0.0],
            [2.0, 0.0, 0.0],
        ],
                              [
                                  [1.0, 2.0, 3.0],
                                  [0.0, 0.0, 0.0],
                                  [0.0, 2.0, 2.0],
                                  [2.0, 0.0, 0.0],
                              ]]

        triangles = model.get_triangles()

        self.assertEqual(str(triangles), str(expected_triangles))
示例#2
0
    def test_get_triangles_return_fan_triangles(self):
        model = Model(vertices=[
            [0.0, 2.0, 2.0],
            [0.0, 0.0, 0.0],
            [2.0, 0.0, 0.0],
            [2.0, 2.0, 0.0],
        ],
                      faces=[
                          [[1, 0, 0], [2, 0, 0], [3, 0, 0], [4, 0, 0]],
                      ])

        expected_triangles = [[
            [0.0, 0.0, 0.0],
            [0.0, 2.0, 2.0],
            [0.0, 0.0, 0.0],
            [2.0, 0.0, 0.0],
        ],
                              [
                                  [0.0, 0.0, 0.0],
                                  [0.0, 2.0, 2.0],
                                  [2.0, 0.0, 0.0],
                                  [2.0, 2.0, 0.0],
                              ]]

        triangles = model.get_triangles()

        self.assertEqual(str(triangles), str(expected_triangles))
示例#3
0
 def setUp(self):
     self.model = Model(model_nm="Test model", grp_struct=DEF_GRP_STRUCT)
     self.exec_key = self.model.exec_key
     self.agent = Agent("Test agent", exec_key=self.model.exec_key)
     self.agent2 = Agent("Test agent 2", exec_key=self.model.exec_key)
     self.blue_grp = get_agent(BLUE_GRP_NM, self.exec_key)
     self.red_grp = get_agent(RED_GRP_NM, self.exec_key)
示例#4
0
    def test_model_save_load_run_from_disk(self, dump, load):
        DEF_GRP[GRP_ACTION] = self.complex_agent_action
        DEF_GRP[MBR_CREATOR] = self.complex_agent_create
        SECOND_GRP = DEF_GRP.copy()
        SECOND_GRP[COLOR] = RED
        GRP_STRUCT = {
            "COMPLEX_RED_GRP": SECOND_GRP,
            "COMPLEX_BLUE_GRP": DEF_GRP
        }
        complexModel = Model(grp_struct=GRP_STRUCT, model_nm="Basic")
        complexModel.run(5)
        registry.save_reg(key=complexModel.exec_key)
        registry.load_reg(complexModel.exec_key)
        loaded_object = get_model(complexModel.exec_key)
        self.assertTrue(type(loaded_object) == Model)
        self.assertTrue("Basic" == loaded_object.module)
        all_red_members_have_attribute_5 = True
        all_blue_memebrs_have_attribute_10 = True
        deserialized_model = loaded_object
        deserialized_model.run(5)
        for grp in deserialized_model.groups:
            for member in grp.members:
                if grp.color == BLUE:
                    all_blue_memebrs_have_attribute_10 = \
                        all_blue_memebrs_have_attribute_10 and (
                                grp[member].get_attr("value") != 5)
                else:
                    all_red_members_have_attribute_5 = \
                        all_red_members_have_attribute_5 and (
                                grp[member].get_attr("value") == 5)

        self.assertTrue(all_red_members_have_attribute_5)
        self.assertTrue(all_blue_memebrs_have_attribute_10)
示例#5
0
    def __init__(self, protector):
        self.protector = protector
        self.db_model = Model()

        self.vendor_name = 'nasys'
        self.model_name = 'ul2011'

        self.initialize_notifier()
示例#6
0
    def __init__(self, num_classes, learning_rate=0.01):
        self.num_classes = num_classes
        self.learning_rate = learning_rate

        self._create_placeholders()
        self._build_model()

        Model.__init__(self)
示例#7
0
def main(opts):
    cap = cv2.VideoCapture(opts.input)

    model = Model(66, opts.size)
    model.load(opts.weights)

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter('data/res.mp4', fourcc, 10, (640, 480))

    while True:
        _, img = cap.read()

        # Предикт лица (по дефолту тут HOG)
        inp_img = img[:, :, ::-1]
        face_locations = face_recognition.face_locations(inp_img)

        for (top, right, bottom, left) in face_locations:
            # Расширяем ббокс и смещаем вертикально
            bbox_width = abs(bottom - top)
            bbox_height = abs(right - left)
            left -= int(2 * bbox_width / 4)
            right += int(2 * bbox_width / 4)
            top -= int(3 * bbox_height / 4)
            bottom += int(bbox_height / 4)

            # Выход за пределы
            top = max(top, 0)
            left = max(left, 0)
            bottom = min(img.shape[0], bottom)
            right = min(img.shape[1], right)

            crop = img[top:bottom, left:right]

            # Ресайз по меньшей стороне и кроп от центра
            crop = resize_center_crop(crop, opts.size)

            # Нормализация
            normed_img = normalize(crop)
            imgs = []
            imgs.append(normed_img)

            # Предикт
            res = model.test_online(imgs)

            # Отрисовка
            img = draw_axis(img,
                            *res,
                            tdx=(left + right) / 2,
                            tdy=(top + bottom) / 2,
                            size=100)
            cv2.rectangle(img, (left, top), (right, bottom), (255, 0, 0), 1)

        out.write(img)
        cv2.imshow('img', img)
        if cv2.waitKey(1) == 27:
            break
    out.release()
def test_model():
    sample_image_path = os.getcwd(
    ) + "/app/frontend/build/assets/semsearch/datasets/fashion200/0.jpg"
    efficientnet_model = Model()

    image_array = image_to_np_array(sample_image_path,
                                    efficientnet_model.image_size)
    features = efficientnet_model.get_features(np.asarray([image_array]))
    assert (features.shape == (1, 7, 7, 1280))
示例#9
0
 def start(self, model_checker):
     Model.start(self, model_checker)
     mac1 = (0x00, 0x00, 0x00, 0x00, 0x01, 0x00)
     mac2 = (0x00, 0x00, 0x00, 0x00, 0x01, 0x01)
     mac3 = (0x00, 0x00, 0x00, 0x01, 0x01, 0x01)
     ip1 = "128.0.0.11"
     ip2 = "128.0.0.12"
     ip3 = "128.0.0.13"
     self.clients[0].enableAction("send_packet", (mac1, ip1))
     self.clients[0].enableAction("send_packet", (mac2, ip2))
     self.clients[0].enableAction("send_packet", (mac3, ip3))
示例#10
0
 def start(self, model_checker):
     Model.start(self, model_checker)
     mac1 = (0x00, 0x00, 0x00, 0x00, 0x01, 0x00)
     mac2 = (0x00, 0x00, 0x00, 0x00, 0x01, 0x01)
     mac3 = (0x00, 0x00, 0x00, 0x01, 0x01, 0x01)
     ip1 = "128.0.0.11"
     ip2 = "128.0.0.12"
     ip3 = "128.0.0.13"
     self.clients[0].enableAction("send_packet", (mac1, ip1))
     self.clients[0].enableAction("send_packet", (mac2, ip2))
     self.clients[0].enableAction("send_packet", (mac3, ip3))
示例#11
0
文件: __init__.py 项目: mlnd/tcsl
def runTests(X_test, X_train, y_test, y_train):
    classifiers = []
    train_times = []
    gs_times = []
    pred_times = []
    f1_trains = []
    f1_tests = []

    for classifier, parameters, gs_params in CLASSIFIERS:
        print(classifier, parameters, gs_params)
        this_model = Model(classifier, parameters, gs_params)

        this_model(X_train, y_train, X_test, y_test)

        # Append vanilla values
        classifiers.append(this_model.classifier.__name__)
        train_times.append(this_model.training_time)
        pred_times.append(this_model.train_prediction_time)
        f1_trains.append(this_model.f1_train)
        f1_tests.append(this_model.f1_test)
        gs_times.append(0)

        # Append optimized classifier values
        classifiers.append(this_model.classifier.__name__ + ' (Optimized)')
        train_times.append(this_model.optimal_training_time)
        pred_times.append(this_model.optimal_train_prediction_time)
        f1_trains.append(this_model.f1_optimal_train)
        f1_tests.append(this_model.f1_optimal_test)
        gs_times.append(this_model.gs_time)

    return classifiers, train_times, pred_times, f1_trains, f1_tests, gs_times
示例#12
0
 def test_get_model(self):
     """
     Register a model and fetch it back.
     """
     self.model = Model(exec_key=self.exec_key)
     reg_model(self.model, self.exec_key)
     self.assertEqual(self.model, get_model(self.exec_key))
示例#13
0
    def parse(self, path):
        self.vertices = []
        self.faces = []
        self.normals = []
        self.textures = []
        with open(path, 'r') as file:
            for line in file.readlines():
                parts = line.strip().split(' ')
                dataType = parts[0]
                if dataType == ObjParsingStrategy.VERTEX_TYPE:
                    vertex = self._create_vertex_from_line_parts(parts)
                    self.vertices.append(vertex)
                elif dataType == ObjParsingStrategy.NORMAL_TYPE:
                    normal = self._create_normal_from_line_parts(parts)
                    self.normals.append(normal)
                elif dataType == ObjParsingStrategy.TEXTURE_TYPE:
                    texture = self._create_texture_from_line_parts(parts)
                    self.textures.append(texture)
                elif dataType == ObjParsingStrategy.FACE_TYPE:
                    face = self._create_face_from_line_parts(parts)
                    self.faces.append(face)

        return Model(vertices=self.vertices,
                     textures=self.textures,
                     normals=self.normals,
                     faces=self.faces)
    def test_parse_should_return_with_parser_strategy_returns(self):
        expectedModel = Model([], [])
        self._parsing_strategy_mock.parse.return_value = expectedModel

        path = 'dummy/path'
        model = self._parser.parse(path)

        self.assertEqual(model, expectedModel)
示例#15
0
def train(opts):
    # Select device
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    # Define model
    model = Model().to(device)
    
    # Define dataloaders
    train_loader, val_loader = split_trainval(opts.data, opts.bs)
    
    # Define loss
    loss_criter = nn.L1Loss().to(device)

    # Define optimizer
    optimizer = Adam(model.parameters(), lr=opts.lr, weight_decay=1e-6)
    scheduler = StepLR(optimizer, step_size=int(opts.epoch/2), gamma=0.1)

    # Training loop
    for epoch in range(opts.epoch):
        # Train cycle
        running_loss = 0.0
        model.train()

        for batch_num, (inputs, labels) in enumerate(train_loader):
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            outputs = model(inputs)
            loss = loss_criter(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            running_loss += loss.item() * inputs.size(0)

            print(f'epoch num {epoch:02d} batch num {batch_num:04d} train loss {loss:02.04f}', end='\r')

        epoch_loss = running_loss / len(train_loader.dataset)

        # Val cycle
        running_loss = 0.0
        model.eval()
        for inputs, labels in val_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            with torch.no_grad():
                outputs = model(inputs)
                loss = loss_criter(outputs, labels)
            running_loss += loss.item() * inputs.size(0)
        epoch_val_loss = running_loss / len(val_loader.dataset)
        print(f'\n\nepoch num {epoch:02d} train loss {epoch_loss:02.04f} val loss {epoch_val_loss:02.04f}')

        scheduler.step()
        if (epoch + 1) % opts.save_every == 0:
            torch.save(model.state_dict(), os.path.join(opts.output, f'checkpoint_size{opts.size}_e{epoch+1}of{opts.epoch}_lr{opts.lr:.01E}.pth'))
示例#16
0
    def __json_to_object(self, serial_obj, exec_key):
        """
        Takes a serial JSON object back into a live Python object.
        """
        restored_obj = dict()
        restored_groups = []
        model_deserialized = False
        for obj_name in serial_obj:
            should_restore_object = isinstance(serial_obj[obj_name],
                                               dict) and "type" in serial_obj[
                                        obj_name]
            if should_restore_object:
                if serial_obj[obj_name]["type"] == "TestUser":
                    restored_obj[obj_name] = TermUser(name=obj_name,
                                                      serial_obj=serial_obj[
                                                          obj_name],
                                                      exec_key=exec_key)
                if serial_obj[obj_name]["type"] == "APIUser":
                    restored_obj[obj_name] = APIUser(name=obj_name,
                                                     serial_obj=serial_obj[
                                                         obj_name],
                                                     exec_key=exec_key)
                if serial_obj[obj_name]["type"] == "Agent":
                    restored_obj[obj_name] = Agent(name=obj_name,
                                                   serial_obj=serial_obj[
                                                       obj_name],
                                                   exec_key=exec_key)
                elif serial_obj[obj_name]["type"] == "Model":
                    from lib.model import Model
                    print(f'restoring model for key {exec_key}')
                    restored_obj[obj_name] = Model(exec_key=exec_key,
                                                   serial_obj=serial_obj[
                                                       obj_name])
                    model_deserialized = True
                elif serial_obj[obj_name]["type"] == "Group":
                    from lib.group import Group
                    restored_obj[obj_name] = Group(exec_key=exec_key,
                                                   serial_obj=serial_obj[
                                                       obj_name],
                                                   name=serial_obj[obj_name][
                                                       'name'])
                    restored_groups.append(restored_obj[obj_name])
                elif serial_obj[obj_name]["type"] == "Env":
                    restored_obj[obj_name] = Env(exec_key=exec_key,
                                                 serial_obj=serial_obj[
                                                     obj_name],
                                                 name=serial_obj[obj_name][
                                                     'name'])
            else:
                restored_obj[obj_name] = serial_obj[obj_name]

            self.registries[exec_key][obj_name] = restored_obj[obj_name]

        if model_deserialized:
            restored_obj['model'].groups = restored_groups
            restored_obj['model'].env = restored_obj['env']
            self.registries[exec_key]['model'] = restored_obj['model']
        return restored_obj
示例#17
0
class EncoderTrainer(ApplicationSession):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.batch_size = 2
        self.needStop = False
        self.codec = Codec()
        self.model = Model('train')

    @asyncio.coroutine
    def train(self):
        self.publish('semanticaio.encoder.trainer.started')
        print('[emit] semanticaio.encoder.trainer.started')
        input_dataset = np.zeros((self.batch_size, self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        output_dataset = np.zeros((self.batch_size, self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        while not self.needStop :
            yield from asyncio.sleep(0.1)
            batch = yield from self.call('semanticaio.db.batch.get', size = self.batch_size)
            for i, question in enumerate(batch) :
                self.codec.encode(question['sentence'], input_dataset[i])
                if question['correctFormId'] == None :
                    self.codec.encode(question['sentence'], output_dataset[i])
                else :
                    correctQuestion = yield from self.call('semanticaio.db.get', id = question['correctFormId'])
                    self.codec.encode(correctQuestion['sentence'], output_dataset[i])
            (loss, accuracy) = self.model.train(input_dataset, output_dataset)
            print('training:', loss, accuracy)

        self.needStop = False
        self.publish('semanticaio.encoder.trainer.stopped')
        print('[emit] semanticaio.encoder.trainer.stopped')

    def load(self, *args, **kwargs):
        print('[call] semanticaio.encoder.trainer.load')
        try :
            self.model.load()
        except :
            print('[error] semanticaio.encoder.trainer.load')
        self.model.compile()

    def save(self, *args, **kwargs):
        print('[call] semanticaio.encoder.trainer.save')
        self.model.save()

    @asyncio.coroutine
    def start(self, *args, **kwargs):
        print('[event received] semanticaio.encoder.trainer.start')
        yield from self.train()

    def stop(self, *args, **kwargs):
        print('[event received] semanticaio.encoder.trainer.stop')
        self.needStop = True

    @asyncio.coroutine
    def onJoin(self, details):
        yield from self.register(self.load, 'semanticaio.encoder.trainer.load')
        yield from self.register(self.save, 'semanticaio.encoder.trainer.save')
        yield from self.subscribe(self.start, 'semanticaio.encoder.trainer.start')
        yield from self.subscribe(self.stop, 'semanticaio.encoder.trainer.stop')
        print('[encoder-trainer started]')
示例#18
0
 def setUp(self):
     self.exec_key = get_exec_key()
     self.newton = create_newton()
     self.calcs = create_calcguys(self.exec_key, [])
     self.cambs = create_cambguys(self.exec_key)
     self.pop_hist = PopHist()
     self.model = Model(exec_key=self.exec_key)
     self.env = self.model.env
     self.env.action = env_action
示例#19
0
    def __init__(self,
                 max_length,
                 embedding_size,
                 num_hidden,
                 num_classes,
                 learning_rate=0.01,
                 sentences=None,
                 sequence_lengths=None):
        self.max_length = max_length
        self.embedding_size = embedding_size
        self.num_hidden = num_hidden
        self.num_classes = num_classes
        self.learning_rate = learning_rate

        with tf.variable_scope("SentenceClassifier"):
            self._create_placeholders()
            self._create_weights()
            self._build_model(sentences, sequence_lengths)

        Model.__init__(self)
示例#20
0
def update_cache():
    json = {'command_names': {}}

    db_model = Model()

    # command id - names
    all_commands = db_model.get_commands()
    for cmd in all_commands:
        json['command_names'][cmd['cmd_id']] = cmd['cmd_name']

    # device names and vendors
    all_devices = db_model.get_all_device_names()
    json['devices'] = {}
    for dev in all_devices:
        json['devices'][dev['dev_name']] = {
            'vendor_name': dev['vendor_name'],
            'model_name': dev['model_name']
        }

    # find all models, their vendor and their connection info
    for m in db_model.get_models_w_cnx_info():
        vendor_name = m['vendor_name']
        model_name = m['model_name']

        if vendor_name not in json:
            json[vendor_name] = {}

        if model_name not in json[vendor_name]:
            json[vendor_name][model_name] = {}

        # aliases to the dictionaries
        m_dict = json[vendor_name][model_name]

        # connection info
        m_dict['cnx'] = {
            'name': m['cnx_name'],
            'type': m['cnxt_name'],
            'server': m['cnx_server'],
            'port': m['cnx_port'],
            'username': m['cnx_username'],
            'msg_template': m['cnx_msg_template']
        }

        if m['cnxt_name'] == 'mqtt':
            m_dict['cnx']['tx_topic'] = m['cnx_mqtt_tx_topic']
            m_dict['cnx']['rx_topic'] = m['cnx_mqtt_rx_topic']

        # generate model commands
        m = import_module(f'drivers.{vendor_name}.{model_name}.funcs')
        m_dict['commands'] = m.generate_cmd_table(all_commands)

    db_model.cleanup()
    print(cache_json_path)
    with open(cache_json_path, 'w') as fout:
        json_dump(json, fout, indent=4)
示例#21
0
文件: net_train.py 项目: ht014/snedq
    def __init__(self,
                 data_processor,
                 bottleneck_dim=128,
                 num_codebooks=16,
                 hidden_dim=512,
                 decoder_layers=2,
                 encoder_layers=2,
                 **kwargs):
        super().__init__()
        self.data_processor = data_processor
        self.encoder1 = nn.Sequential(
            Feedforward(self.data_processor.input_dim,
                        hidden_dim,
                        num_layers=encoder_layers,
                        **kwargs), nn.Linear(hidden_dim, bottleneck_dim))

        self.quntizer = Model(input_dim=bottleneck_dim,
                              hidden_dim=1024,
                              bottleneck_dim=256,
                              encoder_layers=2,
                              decoder_layers=2,
                              Activation=nn.ReLU,
                              num_codebooks=8,
                              codebook_size=256,
                              initial_entropy=3.0,
                              share_codewords=True).cuda()
        self.distance = DISTANCES['euclidian_squared']
        self.triplet_delta = 5
        all_parameters = list(self.encoder1.parameters()) + list(
            self.quntizer.parameters())
        self.optimizer = OneCycleSchedule(QHAdam(all_parameters,
                                                 nus=(0.8, 0.7),
                                                 betas=(0.95, 0.998)),
                                          learning_rate_base=1e-3,
                                          warmup_steps=10000,
                                          decay_rate=0.2)
        self.experiment_path = 'logs'

        self.writer = SummaryWriter(self.experiment_path, comment='Cora')
示例#22
0
def main(opts):
    model = Model(66, opts.size)
    model.model.summary()
    model.load(opts.weights)

    train_list, val_list = split(opts.data)
    val_dataset = AFLW2000(val_list, batch_size=1, input_size=opts.size)

    err, times = [], []
    for idx, (x, y) in enumerate(val_dataset.data_generator()):
        print(f'{idx}/{val_dataset.epoch_steps}')

        t1 = time()
        res = model.test_online(x)
        times.append(time() - t1)
        ypr = np.array(y)[:, 0, 1]
        err.append(abs(ypr - res))

        print(f'YPR: {np.mean(np.array(err), axis=0)}')
        print(f'TIME: {np.mean(times)}')
        if idx == val_dataset.epoch_steps:
            break
示例#23
0
class Encoder(ApplicationSession):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.codec = Codec()
        self.model = Model('encode')

    def load(self, *args, **kwargs):
        print('[call] semanticaio.encoder.load')
        try :
            self.model.load()
        except :
            print('[error] semanticaio.encoder.load')
        self.model.compile()

    def _encode(self, question) :
        coded_question = np.zeros((self.codec.seq_len, self.codec.n_chars), dtype = np.bool)
        self.codec.encode(question, coded_question)
        return self.model.encode(coded_question).tolist()

    def encode(self, *args, **kwargs):
        print('[call] semanticaio.encoder.encode:', kwargs)
        result = {}
        if 'question' in kwargs :
            result['encoded'] = self._encode(kwargs['question'])
        elif 'questions' in kwargs :
            result['encoded'] = []
            for question in kwargs['questions'] :
                result['encoded'].append(self._encode(question))
        return result

    @coroutine
    def onJoin(self, details):
        yield from self.register(self.load, 'semanticaio.encoder.load')
        yield from self.register(self.encode, 'semanticaio.encoder.encode')
        print('[encoder started]')
示例#24
0
文件: cron.py 项目: johmats/ergasia
def cronjob(sched_id, cmd_id):
    db_model = Model()

    devices = db_model.get_schedule_active_devices(sched_id, 'iled', 'iled')
    if not devices:
        return

    with open(cache_json_path, 'r') as fin:
        json = json_load(fin)
        cmd_name = json['command_names'][str(cmd_id)]
        
    errors = []
    succeeded = []
    dev_ids_to_update = []
    for dev_id, dev_name, password in devices:
        # call worker.py
        res = handle_request(
            vendor='iled', model='iled', dev_name=dev_name,
            password=password, command=cmd_id
        )

        if res['status'] != 'OK':
            errors.append({
                'dev_name': dev_name,
                'status': res['status'],
                'reason': res['message']
            })
        else:
            dev_ids_to_update.append(dev_id)
            succeeded.append(dev_name)
    
    if errors:
        json = json_dumps({'schedule_id' : sched_id, 'vendor' : 'iled', 'model' : 'iled', 'command': cmd_name, 'errors' : errors})
        db_model.insert_log('schedule_error', json, 1)
    
    if dev_ids_to_update:
        db_model.update_device_status(dev_ids_to_update, cmd_id=cmd_id, dstat_name='active')

    db_model.cleanup()
示例#25
0
文件: __init__.py 项目: mlnd/tcsl
def runTests(X_test, X_train, y_test, y_train):
    classifiers = []
    train_times = []
    pred_times = []
    f1_trains = []
    f1_tests = []

    for classifier, parameters in CLASSIFIERS:
        this_model = Model(classifier, parameters)

        this_model(X_train, y_train, X_test, y_test)
        classifiers.append(this_model.classifier.__class__.__name__)
        train_times.append(this_model.training_time)
        pred_times.append(this_model.train_prediction_time)
        f1_trains.append(this_model.f1_train)
        f1_tests.append(this_model.f1_test)

    return classifiers, train_times, pred_times, f1_trains, f1_tests
示例#26
0
文件: cron.py 项目: johmats/ergasia
def _cronjob(sched_id):
    db_model = Model()

    devices = db_model.get_schedule_active_devices(sched_id, 'nasys', 'ul2011')

    sched_cmd_data = generate_schedule(sched_id)
    cmd = 'send_custom_command'
    msg = json_dumps(sched_cmd_data)

    errors = []
    succeeded = []
    for _dev_id, dev_name, password in devices:
        res = handle_request(vendor='nasys',
                             model='ul2011',
                             dev_name=dev_name,
                             password=password,
                             command=cmd,
                             parameter=msg)

        if res['status'] != 'OK':
            errors.append({
                'dev_name': dev_name,
                'status': res['status'],
                'reason': res['message']
            })
        else:
            succeeded.append(dev_name)

    if errors:
        json = json_dumps({
            'schedule_id': sched_id,
            'vendor': 'nasys',
            'model': 'ul2011',
            'errors': errors
        })
        db_model.insert_log('schedule_error', json, 1)

    db_model.cleanup()
示例#27
0
def main(opts):
    model = Model(66, opts.size)

    # Если есть предобучение, то возьмем
    if opts.pretrain is not None:
        print(f'Initial weights from {opts.pretrain}')
        model.load(opts.pretrain)

    train_list, val_list = split(opts.data)

    # Обучающий лоадер с аугментациями, но там их не сильно много
    train_dataset = AFLW2000(train_list,
                             augment=True,
                             batch_size=opts.bs,
                             input_size=opts.size)
    val_dataset = AFLW2000(val_list, batch_size=opts.bs, input_size=opts.size)

    # Учим
    chkpt_name = f'model_size{opts.size}_e{opts.epoch}_lr{opts.lr:.01E}.h5'
    model.train(chkpt_name, train_dataset, val_dataset, opts.epoch)
示例#28
0
def main(opts):
    # Select device
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    # Define model
    model = Model().to(device)
    model.load_state_dict(torch.load(opts.weights))
    model.eval()

    # Define dataloader
    test_loader = get_test(opts.input)

    preds = []
    for img in test_loader:
        img = img.to(device)
        with torch.no_grad():
            predict = model(img)
            predict = predict.cpu().detach().numpy().squeeze()
        preds.append(predict)
    preds = np.array(preds)
    np.save(os.path.join(opts.input, 'ytest.npy'), preds)
示例#29
0
def update_cron():
    times = get_sunrise_sunset_times()

    db_model = Model()
    schedule_items = db_model.get_all_schedule_items(times)
    schedule_models = db_model.get_all_schedule_models()
    db_model.cleanup()

    # print(schedule_items)
    # print(schedule_models)

    with open(cron_txt_path, 'w') as fout:
        write_crons_header(fout)

        for sched_id in schedule_items:
            items = schedule_items[sched_id]
            models = schedule_models[sched_id]

            # FIXME: the lines probably belond in a separate function for each model in drivers/model/funcs.py

            # NASys controllers
            if ('nasys', 'ul2011') in models:
                fout.write(
                    f'{crontimes["nasys_update_schedule"]} /usr/bin/python3 {basepath}/drivers/nasys/ul2011/cron.py {sched_id}\n'
                )

            # iLED controllers
            if ('iled', 'iled') in models:
                for time, cmd_id, _cmd_name in items:
                    fout.write(
                        f'{time[3:5]} {time[0:2]} * * * /usr/bin/python3 {basepath}/drivers/iled/iled/cron.py {sched_id} {cmd_id}\n'
                    )
        fout.flush()

    username = getenv('USER')
    run([f'crontab -u {username} {cron_txt_path} <<EOL'], shell=True)
示例#30
0
from flask import Flask, send_from_directory, jsonify

from lib.model import Model
from lib.prezi_indexer import PerziIndexer

app = Flask(__name__, static_folder="prezi-app/dist", static_url_path="")
model = Model(
    PerziIndexer.index_json(open("data/prezis.json").read(), index_by='id'))


@app.route('/')
def index():
    return app.send_static_file('index.html')


@app.route('/<path:path>')
def static_proxy(path):
    return app.send_static_file(path)


@app.route('/prezi/<id>')
def get_prezi_data(id):
    return jsonify(model[id])


@app.route('/search/<phrase>')
def search_name(phrase):
    return jsonify(model.search(phrase, field_key="title"))


if __name__ == '__main__':
示例#31
0
 def __init__(self):
     Model.__init__(self)
示例#32
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.batch_size = 2
     self.needStop = False
     self.codec = Codec()
     self.model = Model('train')
    def test_parse_should_call_given_parser_strategy(self):
        path = 'dummy/path'
        model = Model([], [])
        self._writer.write(path, model)

        self._writing_strategy_mock.write.assert_called_once_with(path, model)
from data import load_data
from lib.model import Model
from util import Logger, train, validation, AdamOptimizer

# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]=str(exp_config['device'])
# torch.cuda.set_device(0)

# data
train_data, val_data = load_data(data_config, exp_config['batch_size'])
eval_length = data_config['eval_length']

# logger

# model
model = Model(**model_config).to(0)

# optimizer
optimizer = AdamOptimizer(params=model.parameters(), lr=exp_config['lr'],
                          grad_clip_value=exp_config['grad_clip_value'],
                          grad_clip_norm=exp_config['grad_clip_norm'])

logger_on = True

if logger_on:
    logger = Logger(exp_config, model_config, data_config)

# train / val loop
for epoch in range(exp_config['n_epochs']):

    print('Epoch:', epoch)
示例#35
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.codec = Codec()
     self.model = Model('encode')