예제 #1
0
파일: structure.py 프로젝트: lanl-ansi/dwig
    def coupler_filter(self, coupler_set):
        coupler_sites = set([])
        for (i, j) in coupler_set:
            coupler_sites.add(i)
            coupler_sites.add(j)

        filtered_sites = set([])
        for site in self.sites:
            if site.index in coupler_sites:
                filtered_sites.add(site.index)

        filtered_couplers = []
        for i, j in self.couplers:
            if (i.index in filtered_sites and j.index in filtered_sites):
                coupler = (i.index, j.index)
                if coupler in coupler_set:
                    filtered_couplers.append(coupler)

        if len(filtered_couplers) != len(coupler_sites):
            print_err(
                'warning: given a coupler set of size {} but found only {} active couplings from this set'
                .format(len(coupler_sites), len(filtered_couplers)))
            filtered_sites = set([])
            for (i, j) in filtered_couplers:
                filtered_sites.add(i)
                filtered_sites.add(j)

        return ChimeraQPU(filtered_sites, filtered_couplers, self.cell_size,
                          self.chimera_degree, self.site_range,
                          self.coupler_range, self.chimera_degree_view,
                          self.chip_id, self.endpoint, self.solver_name)
예제 #2
0
파일: structure.py 프로젝트: cnxtech/dwig
def _rescale(fields, couplings, offset, site_range, coupler_range):
    assert (site_range.lb + site_range.ub == 0.0)
    assert (coupler_range.lb + coupler_range.ub == 0.0)

    scaling_factor = 1.0
    scale = 1.0

    for field in fields.values():
        if field != 0:
            if field < site_range.lb:
                scaling_factor = min(scaling_factor,
                                     site_range.lb / float(field))
            if field > site_range.ub:
                scaling_factor = min(scaling_factor,
                                     site_range.ub / float(field))

    for coupling in couplings.values():
        if coupling != 0:
            if coupling < coupler_range.lb:
                scaling_factor = min(scaling_factor,
                                     coupler_range.lb / float(coupling))
            if coupling > coupler_range.ub:
                scaling_factor = min(scaling_factor,
                                     coupler_range.ub / float(coupling))

    if scaling_factor < 1.0:
        print_err(
            'info: rescaling field to {} and couplings to {} with scaling factor {}'
            .format(site_range, coupler_range, scaling_factor))
        fields = {k: v * scaling_factor for k, v in fields.items()}
        couplings = {k: v * scaling_factor for k, v in couplings.items()}
        offset = offset * scaling_factor
        scale = 1 / scaling_factor

    return fields, couplings, offset, scale
예제 #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('edges_filename',
                        default='interim/customer_product_counts.csv')
    parser.add_argument('customers_filename',
                        default='data/publicChallenge.csv')
    parser.add_argument('-o', '--output', default='rwalks.probas')
    parser.add_argument('-l', '--walk-length', default=2, type=int)
    parser.add_argument('-w', '--no-walks', default=20, type=int)
    parser.add_argument('-N', '--top-n', default=20, type=int)
    parser.add_argument('-s', '--seed', default=101, type=int)
    args = parser.parse_args()

    rw = RandomWalks(args.seed)

    # customers, products, G = nx_load_data(edges_filename)
    common.print_err("Loading...")
    rw.load(args.edges_filename)
    common.print_err("Loaded")

    # customers_to_predict = customers

    # with open(customers_to_predict_filename, 'rb') as f:
    #   customers_to_predict = ['c'+cid.strip() for cid in f if cid.strip() != ""]
    customers_to_predict = customer.load_ids(args.customers_filename,
                                             add_prefix='c')

    # walk_length = 2
    # no_of_walks = 20 # 100

    rw.predict(customers_to_predict,
               args.walk_length,
               args.no_walks,
               args.top_n,
               args.output)
예제 #4
0
def handle_request(request):
    print_err("access: " + request.fields[0])
    print_err("params: " + request.fields[2])
    try:
        if request.is_error:
            return handle_err(request)
        else:
            return handle_dat(request)
    except (SecurityViolation, InvalidRequest) as e:
        fields = map(str, e.args)
        return unet_request.Request(*fields, is_error=True)
    except Exception as e:
        traceback.print_exc()
예제 #5
0
    def back_track(findme):
        '''given a depth ( 0 = subsec, 4 = sub3para) [findme] cycles backwards through list [alist]
        and returns information found in bracket at that sublevel. 
        (e.g. "(iii) A tenant shall..." -> "iii" )
        If there's no brackets, returns section itself e.g. "90.100"'''

        num = 0
        while True:
            if alist[cnt-1-num][0] == findme:
                if (in_bracs(alist[cnt-1-num][1])) is None:
                    return alist[cnt-1-num][1]
                else:
                    return in_bracs(alist[cnt-1-num][1])
            num += 1
            if cnt-1-num < 0:
                return "NOT FOUND"
                print_err(0, f'(Did not find item of depth of {findme} anywhere before line {cnt-1}.)')
예제 #6
0
def sub_levels(depth, parent, data):  #  text, cls):
    global subs_list
    if in_bracs(data[1]) == start[depth]:
        try:
            subs_list[depth] = parent.ol(class_='lvl' + str(data[0]))
        except Exception as e:
            print_err(
                e,
                f'Couldn\'t add new depth {depth} for #{data[0]} for: {data[1]} at {data[2]}'
            )
            return False
    try:
        subs_list[depth].li(data[1], title=str(data[2]))
        return True
    except Exception as e:
        print_err(
            e, f'Couldn\'t add {data[0]} with depth {depth} for: {data[2]}')
        return False
예제 #7
0
def handle_dat(request):
    allowed_directory = "global/"
    file = allowed_directory + request.fields[0]
    args = request.fields[2:]
    if not in_directory(file, allowed_directory):
        raise SecurityViolation("Forbidden", request.fields[0])
    if is_executable(file):
        p = subprocess.run([file] + args,
                           input=request.fields[1].encode("utf-8")
                           if len(request.fields) >= 1 else b'',
                           capture_output=True)
        if len(p.stderr) > 0:
            print_err("stderr: " + p.stderr.decode("utf-8"))
        return unet_request.Request(p.stdout)
    elif is_readable(file):
        with open(file, "rb") as f:
            return unet_request.Request(f.read())
    else:
        raise InvalidRequest("Not Found", file[len(allowed_directory) - 1:])
예제 #8
0
def fix_subs(cur, lst):
    if cur == 'eL':  # for capital L, scroll up until you find an 'h/H'
        for back in range(count):
            if in_bracs(lst[(count - back)][1]) == 'h':
                return 2
            if in_bracs(lst[(count - back)][1]) == 'H':
                return 3
    elif cur == 'romanish':  # for ambiguous roman characters
        # TODO see todos below combine these a little better? Make separate function.
        back_one = lst[(count - 1)]  # first look back one paragraph, that may answer it
        if back_one[0] == 4 or back_one[0] == 5:  # if (i) or (I)...
            return 4
        elif back_one[0] == 3:  # if a subpara (A), did last para (a) match previous letter in alphabet?
            for back in range(count):
                if lst[back][0] == 2:
                    if in_bracs(lst[(count - back)][1]) == prior_para[in_bracs(lst[count][1])]:
                        return 2  # e.g., (i) is just a para after (h)
                    else:
                        return 4  # e.g., (i) is start of roman numeral list i, ii, iii...
        else:
            return 2
    elif cur == 'ROMANISH':  # for ambiguous ROMAN characters, look back one paragraph.
        back_one = lst[(count - 1)][0]
        if back_one == 5:  # if it's a sub3 para (I), then this too is sub3para (II).
            # TODO not necesarily. Should still check. Could be (g), **(h),** (A), (B), (i), (ii), **(i),** (j)...
            return 5
        elif back_one == 4:  # if a sub2para (i), did last subpara (A) match previous letter?
            for back in range(count, 1, -1):
                # TODO could probalby use backward count above and for entire piece where we need to look back.
                # todo see back_track function elsewhere & maybe incorporate it
                if lst[back][0] == 3:
                    if in_bracs(lst[back][1]) == prior_para[in_bracs(lst[count][1]).lower()]:
                        return 3
                    else:
                        return 5
        else:
            return 3
    elif cur == 'dunno':
        print_err(f'unclassified line ("dunno") remains', f'Line #{count} for {cur}')
        return cur
    else:
        return cur
예제 #9
0
def check_index(il, lst):
    global count
    if il[0] == 'subtitle':   # replace subtitles coming after index terms as within index.
        try:
            temp = 0
            while lst[count + temp][0] == 'sub2title' or lst[count + temp][0] == 'subtitle' or \
                    lst[count + temp][0] == 'dunno' or lst[count + temp][0] == 'index':
                if lst[count + temp][0] == 'index':
                    il[0] = 'index_sub'
                    break
                temp += 1
        except Exception as e:
            print_err(e, f'line #{count} for: {il}')
    if il[0] == 'sub2title' or il[0] == 'dunno':   # replace subtitles coming after index terms as within index.
        try:
            temp = 0
            while lst[count + temp][0] == 'sub2title' or lst[count + temp][0] == 'subtitle' or \
                    lst[count + temp][0] == 'dunno' or lst[count + temp][0] == 'index':
                if lst[count + temp][0] == 'index':
                    il[0] = 'index2sub'
                temp += 1
        except Exception as e:
            print_err(e, f'sub2title -> index reclass in line# {count} for: {il}')
예제 #10
0
파일: dwig.py 프로젝트: cnxtech/dwig
def load_config(args):
    config_file_path = args.config_file

    if os.path.isfile(config_file_path):
        with open(config_file_path, 'r') as config_file:
            try:
                config_data = json.load(config_file)
                for key, value in config_data.items():
                    if isinstance(value, dict):
                        print_err(
                            'invalid value for configuration key "%s", only single values are allowed'
                            % config_file_path)
                        quit()
                    if not hasattr(args, key) or getattr(args, key) == None:
                        if isinstance(value, unicode):
                            value = value.encode('ascii', 'ignore')
                        if isinstance(value, list):
                            new_list = []
                            for item in value:
                                if isinstance(item, unicode):
                                    item = item.encode('ascii', 'ignore')
                                new_list.append(item)
                            value = new_list
                        setattr(args, key, value)
                    else:
                        print_err(
                            'skipping the configuration key "%s", it already has a value of %s'
                            % (key, str(getattr(args, key))))
            except ValueError:
                print_err(
                    'the config file does not appear to be a valid json document: %s'
                    % config_file_path)
                quit()
    else:
        if config_file_path != DEFAULT_CONFIG_FILE:
            print_err('unable to open conifguration file: %s' %
                      config_file_path)
            quit()

    return args
예제 #11
0
def get_split_i(splitter, customer_ids, min_x, min_y):
    for customer_id, customer_records in customer.get_mult_records_i(customer_ids):
        if len(customer.get_unique_times(customer_records)) < 2:
            common.print_err("Skipped:", customer_id)
            continue
        x_orders_set, y_row = splitter.split(customer_records)
        if len(x_orders_set) < min_x:
            common.print_err("Skipped due to x too small: {} ({})".format(customer_id, len(x_orders_set)))
            continue
        if len(y_row) < min_y:
            common.print_err("Skipped due to y too small: {} ({})".format(customer_id, len(y_row)))
            continue
            
        yield customer_id, x_orders_set, y_row
예제 #12
0
def cur_children(line, my_div):
    global err
    global slug

    # todo carve out form piece below into new section
    global form
    if line[0] == 'form_start':  # trying to create new form box based on parentage
        try:
            form_id = line[
                2]  # pulls in parent of form type from prior line todo: although why not just do that now?
            if form_id == 0:
                form = slug.div(class_='form-box')
            elif str(form_id).isnumeric():
                if 1 <= form_id <= 5:
                    form = subs_list[form_id - 1].div(class_='form-box')
            else:
                form = my_div.div(line[1], class_='form-box')
                print_err(f'Form not created within section text',
                          f'at {line}')
        except Exception as e:
            print_err({e}, f'form failed at {line}')
        return True
    # // end form carve out

    elif line[0] == 0:
        slug = my_div.p(line[1], class_='flush')
        return True
    elif line[0] == 'form':
        try:
            form.p(line[1], class_='form-box')
        except Exception as e:
            print_err({e}, f'attempted to add form from {line} to form box')
        return True
    elif str(line[0]).isnumeric():
        if line[0] == 1:
            return sub_levels(0, my_div, line)
        if 2 <= line[0] <= 5:
            return sub_levels(line[0] - 1, subs_list[line[0] - 2], line)
예제 #13
0
def build_case(args):
    if not args.seed is None:
        print_err('setting random seed to: {}'.format(args.seed))
        random.seed(args.seed)

    #print_err(args.chimera_edge_set)

    qpu = get_qpu(args.profile, args.ignore_connection,
                  args.hardware_chimera_degree)
    #print_err(qpu)

    if args.chimera_degree != None:
        print_err('filtering QPU to chimera of degree {}'.format(
            args.chimera_degree))
        qpu = qpu.chimera_degree_filter(args.chimera_degree)

    if args.chimera_cell_limit != None:
        print_err('filtering QPU to the first {} chimera cells'.format(
            args.chimera_cell_limit))
        qpu = qpu.cell_filter(args.chimera_cell_limit)

    if args.chimera_cell_box != None:
        chimera_cell_1 = tuple(args.chimera_cell_box[0:2])
        chimera_cell_2 = tuple(args.chimera_cell_box[2:4])
        print_err('filtering QPU to the chimera cell box {} by {}'.format(
            chimera_cell_1, chimera_cell_2))
        qpu = qpu.chimera_cell_box_filter(chimera_cell_1, chimera_cell_2)

    if args.spin_set != None:
        print_err('filtering QPU to the spin set {}'.format(args.spin_set))
        qpu = qpu.spin_filter(args.spin_set)

    if args.coupler_set != None:
        print_err('filtering QPU to the coupler set {}'.format(
            args.coupler_set))
        qpu = qpu.coupler_filter(args.coupler_set)

    if args.generator == 'const':
        qpu_config = generator.generate_disordered(
            qpu, [args.coupling], [1.0], [args.field], [1.0],
            args.random_gauge_transformation)
    elif args.generator == 'ran':
        qpu_config = generator.generate_ran(qpu, args.probability, args.steps,
                                            args.field, args.scale,
                                            args.simple_ground_state)
    elif args.generator == 'gd':
        qpu_config = generator.generate_disordered(
            qpu, args.coupling_values, args.coupling_probabilities,
            args.field_values, args.field_probabilities,
            args.random_gauge_transformation)
    elif args.generator == 'cbfm':
        qpu_config = generator.generate_disordered(
            qpu, [args.j1_val, args.j2_val], [args.j1_pr, args.j2_pr],
            [args.h1_val, args.h2_val], [args.h1_pr, args.h2_pr],
            args.random_gauge_transformation)
    elif args.generator == 'fl':
        qpu_config = generator.generate_fl(
            qpu, args.steps, args.alpha, args.multicell,
            args.cluster_chimera_cells, args.simple_ground_state,
            args.min_loop_length, args.loop_reject_limit,
            args.loop_sample_limit)
    elif args.generator == 'wscn':
        if args.chimera_cell_limit != None:
            print_err(
                'weak-strong cluster networks cannot be constricted with a cell limit.'
            )
            quit()

        if qpu.chimera_degree_view < 6:
            print_err(
                'weak-strong cluster networks require a qpu with chimera degree of at least 6, the given degree is {}.'
                .format(qpu.chimera_degree_view))
            quit()

        effective_chimera_degree = 3 * (qpu.chimera_degree_view // 3)
        if effective_chimera_degree != qpu.chimera_degree_view:
            print_err(
                'the weak-strong cluster network will occupy a space of chimera degree {}.'
                .format(effective_chimera_degree))
        qpu = qpu.chimera_degree_filter(effective_chimera_degree)

        qpu_config = generator.generate_wscn(qpu, args.weak_field,
                                             args.strong_field)
    elif args.generator == 'fclg':
        qpu_config = generator.generate_fclg(qpu, args.steps, args.alpha,
                                             args.gadget_fraction,
                                             args.simple_ground_state,
                                             args.min_loop_length,
                                             args.loop_reject_limit,
                                             args.loop_sample_limit)
    else:
        assert (False)  # CLI failed

    if args.include_zeros:
        config = qpu_config
        if isinstance(qpu_config, QPUAssignment):
            config = qpu_config.qpu_config
        for site in config.qpu.sites:
            if not site in config.fields:
                config.fields[site] = 0.0
        for coupler in config.qpu.couplers:
            if not coupler in config.couplings:
                config.couplings[coupler] = 0.0

    #print_err(qpu_config)
    if args.omit_solution:
        if isinstance(qpu_config, QPUAssignment):
            qpu_config = qpu_config.qpu_config

    data = qpu_config.build_dict(args.include_zeros)

    data[
        'description'] = 'This is a randomly generated B-QP built by D-WIG (https://github.com/lanl-ansi/dwig) using the {} algorithm.'.format(
            args.generator)
    if not args.seed is None:
        data['description'] = data[
            'description'] + '  A random number seed of {} was used.'.format(
                args.seed)

    data['metadata'] = build_metadata(args, qpu)

    return data
예제 #14
0
def get_qpu(profile, ignore_connection, hardware_chimera_degree):
    chip_id = None
    endpoint = None
    solver_name = None
    cell_size = 8

    global _qpu_remote

    if not ignore_connection:
        if _qpu_remote == None:
            try:
                with Client.from_config(config_file=os.getenv("HOME") +
                                        "/dwave.conf",
                                        profile=profile) as client:
                    endpoint = client.endpoint
                    solver = client.get_solver()
                    solver_name = solver.name
                    couplers = solver.undirected_edges
                    sites = solver.nodes

                    solver_chimera_degree = int(
                        math.ceil(math.sqrt(len(sites) / cell_size)))
                    if hardware_chimera_degree != solver_chimera_degree:
                        print_err(
                            'Warning: the hardware chimera degree was specified as {}, while the solver {} has a degree of {}'
                            .format(hardware_chimera_degree, solver_name,
                                    solver_chimera_degree))
                        hardware_chimera_degree = solver_chimera_degree

                    site_range = Range(*solver.properties['h_range'])
                    coupler_range = Range(*solver.properties['j_range'])
                    chip_id = solver.properties['chip_id']

            #TODO remove try/except logic, if there is a better way to check the connection
            except Exception as e:
                print_err(
                    'QPU connection details not found or there was a connection error'
                )
                print_err('  ' + str(e))
                print_err(
                    'assuming full yield square chimera of degree {}'.format(
                        hardware_chimera_degree))
                ignore_connection = True
        else:
            print_err('info: using cached QPU details')
            return _qpu_remote

    if ignore_connection:
        site_range = Range(-2.0, 2.0)
        coupler_range = Range(-1.0, 1.0)

        # the hard coded 4 here assumes an 4x2 unit cell
        graph = dwave_networkx.chimera_graph(hardware_chimera_degree,
                                             hardware_chimera_degree,
                                             cell_size // 2)
        edges = graph.edges()
        #arcs = get_chimera_adjacency(hardware_chimera_degree, hardware_chimera_degree, cell_size//2)
        #print(arcs)

        # turn arcs into couplers
        # this step is nessisary to be consistent with the solver.properties['couplers'] data
        couplers = []
        for i, j in edges:
            assert (i != j)
            if i < j:
                couplers.append((i, j))
            else:
                couplers.append((j, i))
        couplers = set(couplers)

        sites = set([coupler[0] for coupler in couplers] +
                    [coupler[1] for coupler in couplers])

        # sanity check on coupler consistency across both branches
        for i, j in couplers:
            assert (i < j)

    if _qpu_remote == None:
        _qpu_remote = ChimeraQPU(sites,
                                 couplers,
                                 cell_size,
                                 hardware_chimera_degree,
                                 site_range,
                                 coupler_range,
                                 chip_id=chip_id,
                                 endpoint=endpoint,
                                 solver_name=solver_name)
    return _qpu_remote
    def learn(
            self,
            x,
            validation_split=0.0,
            validation_data=None,
            log_file_prefix=None,
            per_epoch_callback_funs=[],
            callbacks=[],
            verbose='print_epochs',  #one of: 'none'
            #        'print_epochs'
            #        'progress_bars'
        deadline=None,
            max_duration=None):
        optim_fun = getattr(optim, self.params.optimizer)
        optimizer = optim_fun(self.model.parameters(),
                              **self.params.optimizer_params)

        # optional validation split
        if validation_split > 0:
            assert validation_data is None
            perm = np.random.permutation(x.shape[0])
            split_idx = round(validation_split * x.shape[0])
            val, train = perm[:split_idx], perm[split_idx:]
            validation_data, x = x[val, :], x[train, :]

        validation = (validation_data is not None)

        # learn data normalization
        self.normalizer.learn(x)
        x = self.normalizer.normalize(x)
        if validation:
            validation_data = self.normalizer.normalize(validation_data)

        # optionally add callbacks
        keras_callbacks = []
        # 'built-in' callbacks
        '''if log_file_prefix:
      #keras_callbacks.append(keras.callbacks.CSVLogger(log_file_prefix + ".log"))
      if self.params.log_weights:
        keras_callbacks.append(WeightLogger(self.vae_loss, log_file_prefix))
      if self.params.log_loss:
        keras_callbacks.append(LossLogger(log_file_prefix,
                                          per_patch=self.log_loss_per_patch))
      if self.params.log_loss and validation:
        keras_callbacks.append(LossLogger(log_file_prefix, loss='val_loss'))
    # externally defined keras callback objects
    for callback in callbacks:
      keras_callbacks.extend(callbacks)
    # externally defined callbacks functions
    for callback in per_epoch_callback_funs:
      class CB(keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
          callback()
        def on_epoch_end(self, epoch, logs={}):
          callback()
      keras_callbacks.append(CB())
    
    if self.params.early_stopping:
      if self.params.early_stopping == True:
        monitor = ('val_loss' if validation else 'loss')
      else:
        monitor = self.params.early_stopping
      keras_callbacks.append(keras.callbacks.EarlyStopping(
          monitor=monitor,
          patience=self.params.early_stopping_patience))
    
    if self.reduce_lr_on_plateau:
      if self.reduce_lr_on_plateau == True:
        monitor = ('val_loss' if validation else 'loss')
      else:
        monitor = self.reduce_lr_on_plateau
      keras_callbacks.append(keras.callbacks.ReduceLROnPlateau(
          monitor=monitor,
          factor=self.reduce_lr_factor,
          patience=self.params.reduce_lr_patience))'''
        '''if verbose == 'none':
      verbose = 0
    elif verbose == 'print_epochs':
      verbose = 2
    elif verbose == 'progress_bars':
      verbose = 1
    else:
      assert False # invalid verbosity'''

        loss_fun = vae_loss
        if self.params.prediction_log_var_min is not None:
            # TODO: Now reqularization strength depends on the type (size) of
            # x_log_var. Should probably unify?
            x_log_var_regularizer = lambda xlv: (torch.sum(
                F.relu(self.params.prediction_log_var_min - xlv)))
            loss_fun = lambda x, x_mean, x_log_var, z_mean, z_log_var: (
                vae_loss(x, x_mean, x_log_var, z_mean, z_log_var
                         ) + x_log_var_regularizer(x_log_var))

        if self.cuda:
            import torch.backends.cudnn as cudnn
            #cudnn.benchmark = True

        if max_duration is not None:
            new_dl = datetime.datetime.now() + max_duration
            if deadline is None or new_dl < deadline:
                deadline = new_dl

        #train
        progress_interval = 100
        n_samples = x.shape[0]
        n_patches = n_samples // self.params.batch_size
        best_es_target_loss = float('inf')
        best_state = self.model.state_dict()
        epochs_no_improvement = 0
        #print(self.model)
        start_time = time.perf_counter()
        for epoch in range(self.params.n_epochs):
            #print(self.model.state_dict())
            # train
            epoch_start_time = time.perf_counter()
            self.model.train()
            train_loss = 0
            perm = np.random.permutation(x.shape[0])
            batches = [
                perm[i:i + self.params.batch_size]
                for i in range(0, len(perm), self.params.batch_size)
            ]
            for batch_idx, batch_rows in enumerate(batches):
                batch_data = torch.from_numpy(x[batch_rows, :]).float()
                batch_data = Variable(batch_data)
                if self.cuda:
                    batch_data = batch_data.cuda()
                optimizer.zero_grad()
                recon_mean, recon_log_var, z_mean, z_log_var = self.model(
                    batch_data)
                loss = loss_fun(batch_data, recon_mean, recon_log_var, z_mean,
                                z_log_var)
                loss.backward()
                train_loss += loss.data[0]
                optimizer.step()
                if (batch_idx + 1) % progress_interval == 0:
                    #print(loss.data[0])
                    print_err(
                        'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                        format(epoch, batch_idx * self.params.batch_size,
                               n_samples, 100. * batch_idx / len(batches),
                               loss.data[0] / len(batch_rows)),
                        flush=True)

            train_loss /= n_samples
            print_err("Epoch: %4d  Avg. loss: %.3f" % (epoch, train_loss),
                      end='',
                      flush=True)

            # validation
            if validation:
                self.model.eval()
                val_loss = 0
                log_prob = 0
                kl_dist = 0
                perm = range(validation_data.shape[0])
                batches = [
                    perm[i:i + self.params.batch_size]
                    for i in range(0, len(perm), self.params.batch_size)
                ]
                for batch_idx, batch_rows in enumerate(batches):
                    batch_data = torch.from_numpy(
                        validation_data[batch_rows, :]).float()
                    batch_data = Variable(batch_data, volatile=True)
                    if self.cuda:
                        batch_data = batch_data.cuda()
                    recon_mean, recon_log_var, z_mean, z_log_var = self.model(
                        batch_data)
                    loss = loss_fun(batch_data, recon_mean, recon_log_var,
                                    z_mean, z_log_var)
                    val_loss += loss.data[0]
                    log_prob += torch.sum(
                        gaussian_log_prob(batch_data, recon_mean,
                                          recon_log_var)).data[0]
                    kl_dist += torch.sum(
                        std_gaussian_kl_dist(z_mean, z_log_var)).data[0]

                val_loss /= validation_data.shape[0]
                log_prob /= validation_data.shape[0]
                kl_dist /= validation_data.shape[0]
                print_err("  Val. loss: %.3f (re: %.3f, kl: %.3f)" %
                          (val_loss, log_prob, kl_dist),
                          end='',
                          flush=True)

            epoch_elapsed = time.perf_counter() - epoch_start_time
            elapsed_str = str(datetime.timedelta(seconds=epoch_elapsed))
            print_err("  Time: %s" % (elapsed_str), flush=True)

            for cb in per_epoch_callback_funs:
                cb()

            # early stopping (loss not improving)
            if self.params.early_stopping:
                if self.params.early_stopping_target == 'auto':
                    if validation:
                        es_target_loss = val_loss
                    else:
                        es_target_loss = loss
                elif self.params.early_stopping_target == 'val_loss':
                    es_target_loss = val_loss
                elif self.params.early_stopping_target == 'loss':
                    es_target_loss = train_loss
                else:
                    assert False, "invalid early_stopping_target"

                if es_target_loss < best_es_target_loss:
                    best_es_target_loss = es_target_loss
                    best_state = self.model.state_dict()
                    epochs_no_improvement = 0
                else:
                    epochs_no_improvement += 1
                    if epochs_no_improvement > self.params.early_stopping_patience:
                        self.model.load_state_dict(best_state)
                        print("Early stopping as no improvement in %d epochs" %
                              (self.params.early_stopping_patience))
                        break

            # time based stopping
            if datetime.datetime.now() >= deadline:
                print_err('Epoch %d: Time based stopping' % (epoch))
                break

        #print(self.model.state_dict())
        '''self.vae_loss.fit(x, x,
    def build_models(self):
        class VAEModel(nn.Module):
            def __init__(self, params):
                super(VAEModel, self).__init__()
                self.params = params

                # encoding layers
                self.enc_layers = nn.ModuleList()
                enc_dims = list(self.params.enc_dims)
                enc_activations = auto_expand(self.params.enc_activations)
                if self.params.input_dropout:
                    self.enc_layers.append(
                        nn.Dropout(self.params.input_dropout))
                last_dim = self.params.input_dim
                for i in range(self.params.n_enc):
                    #add_bias_terms = not self.params.batch_normalization
                    add_bias_terms = True
                    self.enc_layers.append(
                        nn.Linear(last_dim, enc_dims[i], bias=add_bias_terms))
                    #'enc_hidden_%d'%(i+1)
                    if i == 0:
                        self.first_dense_enc_layer = self.enc_layers[-1]
                    #if self.params.batch_normalization:
                    #  self.enc_layers.append(BatchNormalization(mode=2))
                    self.enc_layers.append(
                        create_activation(enc_activations[i]))
                    if self.params.enc_dropout:
                        self.enc_layers.append(
                            nn.Dropout(
                                auto_expand(self.params.enc_dropout)[i]))
                    last_dim = enc_dims[i]

                # latent layers
                self.enc_z_mean = nn.Linear(last_dim, self.params.latent_dim)
                self.enc_z_log_var = nn.Linear(last_dim,
                                               self.params.latent_dim)
                #self.enc_z_draw = Gaussian()

                # decoding layers
                self.dec_layers = nn.ModuleList()
                dec_dims = list(self.params.dec_dims)
                dec_activations = auto_expand(self.params.dec_activations)
                if self.params.latent_dropout:
                    self.dec_layers.append(Dropout(self.params.latent_dropout))
                last_dim = self.params.latent_dim
                for i in range(self.params.n_dec):
                    #add_bias_terms = not (self.params.batch_normalization and i != self.params.n_dec)
                    add_bias_terms = True
                    self.dec_layers.append(
                        nn.Linear(last_dim, dec_dims[i], bias=add_bias_terms))
                    if i == self.params.n_dec:
                        self.last_dense_dec_layer = self.dec_layers[-1]
                    #if self.params.batch_normalization and i != self.params.n_dec:
                    #  self.dec_layers.append(BatchNormalization(mode=2))
                    self.dec_layers.append(
                        create_activation(dec_activations[i]))
                    if self.params.dec_dropout:
                        self.dec_layers.append(
                            nn.Dropout(
                                auto_expand(self.params.dec_dropout)[i]))
                    last_dim = dec_dims[i]

                # prediction layers
                self.dec_x_mean = nn.Linear(last_dim, self.params.input_dim)
                self.dec_x_mean_act = None
                if self.params.prediction_mean_activation:
                    self.dec_x_mean_act = create_activation(
                        self.params.prediction_mean_activation)
                #pvname = 'pred_log_var'
                if self.params.prediction_var in [
                        'persample_independent', 'pi'
                ]:
                    self.dec_x_log_var = nn.Linear(last_dim,
                                                   self.params.input_dim)
                    self.dec_x_log_var.weight.data.zero_()
                elif self.params.prediction_var in ['persample_same', 'ps']:
                    self.dec_x_log_var = nn.Linear(last_dim, 1)
                    self.dec_x_log_var.weight.data.zero_()
                elif self.params.prediction_var in [
                        'global_independent', 'gi'
                ]:
                    #self.dec_x_log_var = Const(shape=(self.params.input_dim,), trainable=True, name=pvname)
                    self.dec_x_log_var = nn.Parameter(
                        torch.Tensor(self.params.input_dim))
                    self.dec_x_log_var.data.zero_()
                elif self.params.prediction_var in ['global_same', 'gs']:
                    self.dec_x_log_var = nn.Parameter(torch.Tensor(1))
                    self.dec_x_log_var.data.zero_()
                else:
                    self.dec_x_log_var = Variable(torch.from_numpy(
                        self.params.prediction_var).float(),
                                                  requires_grad=False)
                    #self.dec_x_log_var = torch.from_numpy(self.params.prediction_var).float()
                #self.dec_x_draw = Gaussian(name='pred_draw')
                self.dec_x_log_var_act = None
                if self.params.prediction_log_var_activation:
                    self.dec_x_log_var_act = create_activation(
                        self.params.prediction_log_var_activation)

                # normalization layers
                #if self.params.normalize_input_scale is not None:
                #  scale = K.variable(self.params.normalize_input_scale)
                #  self.enc_layers.insert(0, Lambda(lambda x:
                #       x / scale))
                #  self.dec_layers.append(Lambda(lambda x:
                #       x * scale))
                #if self.params.normalize_input_mean is not None:
                #  mean = K.variable(self.params.normalize_input_mean)
                #  self.enc_layers.insert(0, Lambda(lambda x:
                #       x - mean))
                #  self.dec_layers.append(Lambda(lambda x:
                #       x + mean))
                self.cuda_ = False

            def cuda(self):
                self.cuda_ = True
                super(VAEModel, self).cuda()

            def cpu(self):
                self.cuda_ = False
                super(VAEModel, self).cpu()

            def encode(self, x):
                encoded = x
                for layer in self.enc_layers:
                    encoded = layer(encoded)
                encoded_z_mean = self.enc_z_mean(encoded)
                encoded_z_log_var = self.enc_z_log_var(encoded)
                return encoded_z_mean, encoded_z_log_var

            def decode(self, z):
                decoded = z
                for layer in self.dec_layers:
                    decoded = layer(decoded)
                decoded_x_mean = self.dec_x_mean(decoded)
                if self.dec_x_mean_act:
                    decoded_x_mean = self.dec_x_mean_act(decoded_x_mean)
                if isinstance(self.dec_x_log_var, nn.Module):
                    decoded_x_log_var = self.dec_x_log_var(decoded)
                else:
                    decoded_x_log_var = self.dec_x_log_var
                if self.dec_x_log_var_act:
                    decoded_x_log_var = self.dec_x_log_var_act(
                        decoded_x_log_var)
                return decoded_x_mean, decoded_x_log_var

            def forward(self, x):
                #encoded_z_mean, encoded_z_log_var = self.encode(x.view(-1, self.params.input_dim))
                encoded_z_mean, encoded_z_log_var = self.encode(x)
                z = rand_normal_reparametrised(encoded_z_mean,
                                               encoded_z_log_var)
                return self.decode(z) + (encoded_z_mean, encoded_z_log_var)

        self.model = VAEModel(self.params)

        if torch.cuda.is_available() and torch.cuda.device_count() > 0:
            print_err("GPU available -> using CUDA", flush=True)
            self.cuda = True
            self.model.cuda()
        else:
            self.cuda = False
            print_err("no GPU available -> using CPU", flush=True)
        '''kl_dist = StdGaussianKLDist(name='kl_dist') \
예제 #17
0
def build_sections(sections_list, mn):
    global sec_div
    global cur_div
    global form
    global err
    global subs_list
    ll, sec_div, cur_div, form, err, = None, None, None, None, None
    counter = 0

    for line in sections_list[1:]:
        counter += 1
        if line[0] == 'index' or line[0] == 'index_sub' or line[
                0] == 'index2sub':
            pass
        elif line[0] == 'subtitle':
            mn.h3(line[1], class_='subtitle')
        elif line[0] == 'sub2title':
            mn.h4(line[1], class_='sub2title')
        elif line[0] == 'or_sec':
            sec_div = mn.div(id=line[1], class_='section')
            ll = sec_div.p(line[1], class_='or_sec')
            err = None
            cur_div = sec_div
            reboot_cur()
        elif line[0] == 'temp_sec':
            sec_div = mn.div(class_='section temporary')
            ll = sec_div.p(line[1], class_='or_sec')
            cur_div = sec_div
            reboot_cur()
        elif line[0] == 'da_sec':
            alt_div = cur_div.div(class_='alternate')
            cur_div = alt_div
            cur_div.p(line[1], class_='or_sec')
            reboot_cur()
        elif line[0] == 'leadline':
            ll.span(' ' + line[1], class_='leadline')
        elif line[0] == 'note_next':
            mn.p(line[1], class_='note')
        else:
            if sec_children(line, sec_div):
                pass
            elif cur_children(line, cur_div):
                pass
            else:
                print_err('Generated HTML for unclassified line',
                          f'For {line}')
                if err is None:
                    try:
                        err = cur_div.div(class_='unknown')
                    except Exception as e:
                        print_err(e,
                                  f'creating new error div failed for {line}')
                        err = mn.div(class_='unknown')
                    err.p("** WARNING ** ",
                          style='font-weight:bold;text-align:center')
                    err.p(
                        "Lines below are part of this section, but may not have parsed correctly.",
                        style='font-weight:bold;text-align:center')
                    ptemp = err.p(
                        "View original from the ",
                        style=
                        'font-weight:bold;text-align:center;margin-bottom:20px'
                    )
                    ptemp.a(
                        'Oregon Legislature Website',
                        href=
                        'https://www.oregonlegislature.gov/bills_laws/Pages/ORS.aspx'
                    )
                    ptemp += '.'
                try:
                    err.p(line[1])
                except Exception as e:
                    print_err(
                        e,
                        f' adding to existing err message failed for {line}')
예제 #18
0
파일: dwig.py 프로젝트: cnxtech/dwig
def get_qpu(url, token, proxy, solver_name, hardware_chimera_degree):
    chip_id = None
    cell_size = 8

    if not url is None and not token is None and not solver_name is None:
        print_err(
            'QPU connection details found, accessing "{}" at "{}"'.format(
                solver_name, url))
        if proxy is None:
            remote_connection = RemoteConnection(url, token)
        else:
            remote_connection = RemoteConnection(url, token, proxy)

        solver = remote_connection.get_solver(solver_name)

        couplers = solver.properties['couplers']

        couplers = set([tuple(coupler) for coupler in couplers])

        sites = solver.properties['qubits']

        solver_chimera_degree = int(
            math.ceil(math.sqrt(len(sites) / cell_size)))
        if hardware_chimera_degree != solver_chimera_degree:
            print_err(
                'Warning: the hardware chimera degree was specified as {}, while the solver {} has a degree of {}'
                .format(hardware_chimera_degree, solver_name,
                        solver_chimera_degree))
            hardware_chimera_degree = solver_chimera_degree

        site_range = Range(*solver.properties['h_range'])
        coupler_range = Range(*solver.properties['j_range'])
        chip_id = solver.properties['chip_id']

    else:
        print_err(
            'QPU connection details not found, assuming full yield square chimera of degree {}'
            .format(hardware_chimera_degree))

        site_range = Range(-2.0, 2.0)
        coupler_range = Range(-1.0, 1.0)

        # the hard coded 4 here assumes an 4x2 unit cell
        arcs = get_chimera_adjacency(hardware_chimera_degree,
                                     hardware_chimera_degree, cell_size / 2)

        # turn arcs into couplers
        # this step is nessisary to be consistent with the solver.properties['couplers'] data
        couplers = []
        for i, j in arcs:
            assert (i != j)
            if i < j:
                couplers.append((i, j))
            else:
                couplers.append((j, i))
        couplers = set(couplers)

        sites = set([coupler[0] for coupler in couplers] +
                    [coupler[1] for coupler in couplers])

    # sanity check on coupler consistency across both branches
    for i, j in couplers:
        assert (i < j)

    return ChimeraQPU(sites,
                      couplers,
                      cell_size,
                      hardware_chimera_degree,
                      site_range,
                      coupler_range,
                      chip_id=chip_id)