Exemplo n.º 1
0
    def test_pair_wise_loss_predictions(self, X, label, gc, dc):
        workspace.FeedBlob('X', X)
        workspace.FeedBlob('label', label)
        new_label = np.array([label[1], label[0]])
        new_x = np.array([X[1], X[0]])
        workspace.FeedBlob('new_x', new_x)
        workspace.FeedBlob('new_label', new_label)
        net = core.Net('net')
        net.PairWiseLoss(['X', 'label'], ['output'])
        net.PairWiseLoss(['new_x', 'new_label'], ['new_output'])
        plan = core.Plan('predict_data')
        plan.AddStep(core.execution_step('predict_data',
                                         [net], num_iter=1))
        workspace.RunPlan(plan)
        output = workspace.FetchBlob('output')
        new_output = workspace.FetchBlob('new_output')
        sign = 1 if label[0] > label[1] else -1
        if label[0] == label[1]:
            self.assertEqual(np.asscalar(output), 0)
            return

        self.assertAlmostEqual(
            np.asscalar(output),
            np.asscalar(np.log(1 + np.exp(sign * (X[1] - X[0])))),
            delta=1e-4
        )
        # check swapping row order doesn't alter overall loss
        self.assertAlmostEqual(output, new_output)
    def connect_composition_III(self):
        synapse_list = []
        Mt3v_list = self.non_columnar_neurons['Mt3v']
        Mt3h_list = self.non_columnar_neurons['Mt3h']
        
        for neuron in Mt3v_list:
            neuron.assign_pos(0., 0.)
        for neuron in Mt3h_list:
            neuron.assign_pos(0., 0.)
        
        rule3synapsesv = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3v']
        rule3synapsesh = self.other_synapse_dict[self.other_synapse_dict['postname'] == 'Mt3h']
        
        dtnames = rule3synapsesv.dtype.names
        for cartridge in self.cartridges:
            synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesv[0]])))
            mtn = int(np.floor(cartridge.neurons['L2'].ypos / ((self.hexarray.Y[-1][-1]+1)/4)))
            synapse.link(cartridge.neurons['L2'], Mt3v_list[mtn])
            synapse_list.append(synapse)
            synapse = Synapse(dict(zip(dtnames, [np.asscalar(p) for p in rule3synapsesh[0]])))
            mtn = int(np.floor(cartridge.neurons['L2'].xpos / ((self.hexarray.X[-1][-1]+1)/4)))
            synapse.link(cartridge.neurons['L2'], Mt3h_list[mtn])
            synapse_list.append(synapse)

        self.composition_rules.append({'synapses': synapse_list})
Exemplo n.º 3
0
def build_seq(sub_num, stims, sub_A_sd, sub_B_sd):
    # shuffle stimulus list
    stims = stims.reindex(np.random.permutation(stims.index))
    
    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat([1,2,3,4], len(stims.index)/4, axis=0), 0)
    np.random.shuffle(ISI)
    
    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)
    
    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]
    
    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))
    
    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
Exemplo n.º 4
0
    def _node_to_dict(self, node):
        '''
        This method help master to save MasterNode in JSON format.

        Parameter(s):
            node: MasterNode - Root node of tree that will change to dict type.
        Reutrn(s):
            result: dict - Dict type of tree.
        '''
        if node == None:
            return None

        result = {}

        if node.prop == None:
            result['prop'] = None
        else:
            result['prop'] = list(node.prop)

        if node.theta == None:
            result['theta'] = None
            result['tau'] = None
        else:
            result['theta'] = np.asscalar(node.theta)
            result['tau'] = np.asscalar(node.tau)

        result['left'] = self._node_to_dict(node.left)
        result['right'] = self._node_to_dict(node.right)

        return result
Exemplo n.º 5
0
    def launch_configuration(self, part):
        if self._is_direct:
            max_smem = self._max_shared_memory_needed_per_set_element
            smem_offset = max_smem * _WARPSIZE
            max_block = _device.get_attribute(driver.device_attribute.MAX_BLOCK_DIM_X)
            if max_smem == 0:
                block_size = max_block
            else:
                threads_per_sm = _AVAILABLE_SHARED_MEMORY / max_smem
                block_size = min(max_block, (threads_per_sm / _WARPSIZE) * _WARPSIZE)
            max_grid = _device.get_attribute(driver.device_attribute.MAX_GRID_DIM_X)
            grid_size = min(max_grid, (block_size + part.size) / block_size)

            grid_size = np.asscalar(np.int64(grid_size))
            block_size = (block_size, 1, 1)
            grid_size = (grid_size, 1, 1)

            required_smem = np.asscalar(max_smem * np.prod(block_size))
            return {'op2stride': self._it_space.size,
                    'smem_offset': smem_offset,
                    'WARPSIZE': _WARPSIZE,
                    'required_smem': required_smem,
                    'block_size': block_size,
                    'grid_size': grid_size}
        else:
            return {'op2stride': self._it_space.size,
                    'WARPSIZE': 32}
def run_epoch(session, m, data, is_training, verbose=True):
    if is_training not in [True,False]:
        raise ValueError("mode must be one of [True, False] but received ", is_training)

    total_cost = 0.0
    num_samples_seen= 0
    total_num_correct_predictions= 0
    list_of_training_index_list = get_random_minibatches_index(len(data[0]), BATCH_SIZE)
    total_num_batches = len(data[0]) // BATCH_SIZE
    total_num_reviews = len(data[0])
    #x      = [data[0][BATCH_SIZE * i : BATCH_SIZE * (i+1)] for i in range(total_num_batches)]
    #labels = [data[1][BATCH_SIZE * i : BATCH_SIZE * (i+1)] for i in range(total_num_batches)]
    x=[]
    labels=[]
    for l in list_of_training_index_list:
        x.append([data[0][i] for i in l])
        labels.append([data[1][i] for i in l])

    if is_training:
        if flags.first_training_epoch:
            flags.first_training_epoch= False
            print("For training, total number of reviews is: %d" % total_num_reviews)
            print("For training, total number of batches is: %d" % total_num_batches)

        for mini_batch_number, (_x, _y) in enumerate(zip(x,labels)):
            #print("mini batch: %d" %mini_batch_number)
            # x_mini and mask both have the shape of ( config.MAXLEN x BATCH_SIZE )

            x_mini, mask, labels_mini = prepare_data(_x, _y, MAXLEN_to_pad_to=config.MAXLEN)
            num_samples_seen += x_mini.shape[1]
            num_correct_predictions, _ = session.run([m.num_correct_predictions, m.train_op],
                                                     feed_dict={m._inputs: x_mini,
                                                                m._targets: labels_mini,
                                                                m._mask: mask})
            #print(m.lstm_W.eval())
            total_num_correct_predictions+= num_correct_predictions

        avg_accuracy = total_num_correct_predictions/num_samples_seen
        print("Traversed through %d samples." %num_samples_seen)
        return np.asscalar(avg_accuracy)

    else:
        if flags.first_validation_epoch or flags.testing_epoch:
            flags.first_validation_epoch= False
            flags.testing_epoch= False
            print("For validation/testing, total number of reviews is: %d" % total_num_reviews)
            print("For validation/testing, total number of batches is: %d" % total_num_batches)

        for mini_batch_number, (_x, _y) in enumerate(zip(x, labels)):
            x_mini, mask, labels_mini = prepare_data(_x, _y, MAXLEN_to_pad_to=config.MAXLEN)
            num_samples_seen += x_mini.shape[1]
            cost, num_correct_predictions = session.run([m.cost ,m.num_correct_predictions],
                                                        feed_dict={m._inputs: x_mini,
                                                                   m._targets: labels_mini,
                                                                   m._mask: mask})
            total_cost += cost
            total_num_correct_predictions += num_correct_predictions
        accuracy= total_num_correct_predictions/num_samples_seen
        print("total cost is %.4f" %total_cost)
        return np.asscalar(accuracy)
Exemplo n.º 7
0
def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
    # block stimulus list and shuffle within each block
    q = len(stims.index)
    stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
    stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
    shuffle(stims)
    stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
    stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])

    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)

    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)

    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]

    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))

    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
    def test_lambda_rank_ndcg_loss(self, n, k):
        y = np.random.rand(n).astype(np.float32)
        r = np.random.randint(k, size=n).astype(np.float32)
        dloss = np.random.random(1).astype(np.float32)

        workspace.blobs['y'] = y
        workspace.blobs['r'] = r
        workspace.blobs['dloss'] = dloss

        op = core.CreateOperator('LambdaRankNdcg', ['y', 'r'], ['loss', 'dy'])
        workspace.RunOperatorOnce(op)
        loss = workspace.blobs['loss']
        dy = workspace.blobs['dy']
        ref_loss, ref_dy = self.ref_lambda_rank_ndcg_loss(y, r)
        self.assertAlmostEqual(np.asscalar(loss), ref_loss, delta=1e-4)
        np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)

        op = core.CreateOperator(
            'LambdaRankNdcgGradient', ['y', 'dy', 'dloss'], ['dy_back']
        )
        workspace.RunOperatorOnce(op)
        dy_back = workspace.blobs['dy_back']
        np.testing.assert_allclose(
            dy_back, np.asscalar(dloss) * ref_dy, rtol=1e-5, atol=1e-6
        )
Exemplo n.º 9
0
def test_exclude_targets_combinations_subjectchunks():
    partitioner = ChainNode([NFoldPartitioner(attr='subjects'),
                             ExcludeTargetsCombinationsPartitioner(
                                 k=1,
                                 targets_attr='chunks',
                                 space='partitions')],
                            space='partitions')
    # targets do not need even to be defined!
    ds = Dataset(np.arange(18).reshape(9, 2),
                 sa={'chunks': np.arange(9) // 3,
                     'subjects': np.arange(9) % 3})
    dss = list(partitioner.generate(ds))
    assert_equal(len(dss), 9)

    testing_subjs, testing_chunks = [], []
    for ds_ in dss:
        testing_partition = ds_.sa.partitions == 2
        training_partition = ds_.sa.partitions == 1
        # must be scalars -- so implicit test here
        # if not -- would be error
        testing_subj = np.asscalar(np.unique(ds_.sa.subjects[testing_partition]))
        testing_subjs.append(testing_subj)
        testing_chunk = np.asscalar(np.unique(ds_.sa.chunks[testing_partition]))
        testing_chunks.append(testing_chunk)
        # and those must not appear for training
        ok_(not testing_subj in ds_.sa.subjects[training_partition])
        ok_(not testing_chunk in ds_.sa.chunks[training_partition])
    # and we should have gone through all chunks/subjs pairs
    testing_pairs = set(zip(testing_subjs, testing_chunks))
    assert_equal(len(testing_pairs), 9)
    # yoh: equivalent to set(itertools.product(range(3), range(3))))
    #      but .product is N/A for python2.5
    assert_equal(testing_pairs, set(zip(*np.where(np.ones((3,3))))))
    def build_data_dict(self, rdt):
        np_dict = {}
        
        time_array = rdt[rdt.temporal_parameter]
        if time_array is None:
            raise ValueError("A granule needs a time array")
        for k,v in rdt.iteritems():
            # Sparse values are different and aren't constructed using NumpyParameterData
            if isinstance(rdt.param_type(k), SparseConstantType):
                value = v[0]
                if hasattr(value, 'dtype'):
                    value = np.asscalar(value)
                time_start = np.asscalar(time_array[0])
                np_dict[k] = ConstantOverTime(k, value, time_start=time_start, time_end=None) # From now on
                continue
            elif isinstance(rdt.param_type(k), CategoryType):
                log.warning("Category types temporarily unsupported")
                continue
            elif isinstance(rdt.param_type(k), RecordType):
                value = v
            else:
                value = v

            try:
                if k == 'temp_sample':
                    print repr(value)
                np_dict[k] = NumpyParameterData(k, value, time_array)
            except:
                raise

        return np_dict
Exemplo n.º 11
0
    def evaluate(self, state_batch):

        # Get an action batch
        actions = self.sess.run(self.action_output, feed_dict={self.map_input: state_batch})

        # Create summaries for the actions
        actions_mean = np.mean(np.asarray(actions, dtype=float), axis=0)
        self.actions_mean_plot += actions_mean

        # Only save files every PLOT_STEP steps
        if self.train_counter % PLOT_STEP == 0:

            self.actions_mean_plot /= PLOT_STEP

            summary_action_0 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[0]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[0]))])
            summary_action_1 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[1]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[1]))])
            self.summary_writer.add_summary(summary_action_0, self.train_counter)
            self.summary_writer.add_summary(summary_action_1, self.train_counter)

            self.actions_mean_plot = [0, 0]

        return actions
Exemplo n.º 12
0
    def _numpy_to_values(data, default_range, append):
        '''Convert a NumPy array to values attribute'''
        def to_list_no_index(xvals, yvals):
            return [{"x": x, "y": np.asscalar(y)}
                    for x, y in zip(xvals, yvals)]

        if len(data.shape) == 1 or data.shape[1] == 1:
            xvals = default_range(data.shape[0], append)
            values = to_list_no_index(xvals, data)
        elif len(data.shape) == 2:
            if data.shape[1] == 2:
                # NumPy arrays and matrices have different iteration rules.
                if isinstance(data, np.matrix):
                    xidx = (0, 0)
                    yidx = (0, 1)
                else:
                    xidx = 0
                    yidx = 1

                xvals = [np.asscalar(row[xidx]) for row in data]
                yvals = [np.asscalar(row[yidx]) for row in data]
                values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)]
            else:
                raise ValueError('arrays with > 2 columns not supported')
        else:
            raise ValueError('invalid dimensions for ndarray')

        return values
Exemplo n.º 13
0
def lpc_formants(signal, sr, num_formants, max_freq, time_step,
                 win_len, window_shape='gaussian'):
    output = {}
    new_sr = 2 * max_freq
    alpha = np.exp(-2 * np.pi * 50 * (1 / new_sr))
    proc = lfilter([1., -alpha], 1, signal)
    if sr > new_sr:
        proc = librosa.resample(proc, sr, new_sr)
    nperseg = int(win_len * new_sr)
    nperstep = int(time_step * new_sr)
    if window_shape == 'gaussian':
        window = gaussian(nperseg + 2, 0.45 * (nperseg - 1) / 2)[1:nperseg + 1]
    else:
        window = np.hanning(nperseg + 2)[1:nperseg + 1]
    indices = np.arange(int(nperseg / 2), proc.shape[0] - int(nperseg / 2) + 1, nperstep)
    num_frames = len(indices)
    for i in range(num_frames):
        if nperseg % 2 != 0:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2) + 1]
        else:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2)]
        frqs, bw = process_frame(X, window, num_formants, new_sr)
        formants = []
        for j, f in enumerate(frqs):
            if f < 50:
                continue
            if f > max_freq - 50:
                continue
            formants.append((np.asscalar(f), np.asscalar(bw[j])))
        missing = num_formants - len(formants)
        if missing:
            formants += [(None, None)] * missing
        output[indices[i] / new_sr] = formants
    return output
Exemplo n.º 14
0
    def preprocess(self):
        """

        :return:
        """
        self.N_particles = hypers['N_particles'].value

        # Set up initial state distribution
        # Initial state is centered around the steady state
        D = sz_dtype(self.population.latent_dtype)
        self.mu_initial = self.population.steady_state().reshape((D,1))

        # TODO: Implement a distribution over the initial variances
        sig_initial = np.ones(1, dtype=self.population.latent_dtype)
        sig_initial.fill(np.asscalar(hypers['sig_ch_init'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_initial[neuron.name][compartment.name]['V'] = hypers['sig_V_init'].value
        self.sig_initial = as_matrix(sig_initial)

        # TODO: Implement a distribution over the  transition noise
        sig_trans = np.ones(1, dtype=self.population.latent_dtype)
        sig_trans.fill(np.asscalar(hypers['sig_ch'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_trans[neuron.name][compartment.name]['V'] = hypers['sig_V'].value
        self.sig_trans = as_matrix(sig_trans)
Exemplo n.º 15
0
def find_tip_coordination(a, bondlength=2.6, bulk_nn=4):
    """
    Find position of tip in crack cluster from coordination
    """
    i, j = neighbour_list("ij", a, bondlength)
    nn = np.bincount(i, minlength=len(a))

    a.set_array('n_neighb', nn)
    g = a.get_array('groups')

    y = a.positions[:, 1]
    above = (nn < bulk_nn) & (g != 0) & (y > a.cell[1,1]/2.0)
    below = (nn < bulk_nn) & (g != 0) & (y < a.cell[1,1]/2.0)

    a.set_array('above', above)
    a.set_array('below', below)

    bond1 = np.asscalar(above.nonzero()[0][a.positions[above, 0].argmax()])
    bond2 = np.asscalar(below.nonzero()[0][a.positions[below, 0].argmax()])

    # These need to be ints, otherwise they are no JSON serializable.
    a.info['bond1'] = bond1
    a.info['bond2'] = bond2

    return bond1, bond2
Exemplo n.º 16
0
    def __init__(self, x):
        """Compute Givens rotation for provided vector x.

        Computes Givens rotation
        :math:`G=\\begin{bmatrix}c&s\\\\-\\overline{s}&c\\end{bmatrix}`
        such that
        :math:`Gx=\\begin{bmatrix}r\\\\0\\end{bmatrix}`.
        """
        # make sure that x is a vector ;)
        if x.shape!=(2,1):
            raise ValueError('x is not a vector of shape (2,1)')

        a = numpy.asscalar(x[0])
        b = numpy.asscalar(x[1])
        # real vector
        if numpy.isrealobj(x):
            c, s = blas.drotg(a,b)
        # complex vector
        else:
            c, s = blas.zrotg(a,b)

        self.c = c
        self.s = s
        self.r = c*a + s*b
        self.G = numpy.array([[c, s], [-numpy.conj(s), c]])
Exemplo n.º 17
0
    def __init__(self, obj):
        self.obj = obj
        parameters = []
        names = []
        ties = {}

        def add_par(p, name):
            if not isinstance(p, Parameter):
                p = Parameter(p,p)
            for par_check in parameters + [None]:
                if p is par_check:
                    break
            if par_check is not None:
                # if the above loop encountered a break, it
                # means the parameter is tied

                # we will rename the parameter so that when it is printed it
                # better reflects how it is used
                new_name = tied_name(names[parameters.index(p)], name)
                names[parameters.index(p)] = new_name

                if new_name in ties:
                    # if there is already an existing tie group we need to
                    # do a few things to get the name right
                    group = ties[new_name]

                else:
                    group = [name]

                group.append(name)
                ties[new_name] = group

            else:
                if not p.fixed:
                    parameters.append(p)
                    names.append(name)

        # find all the Parameter's in the obj
        for name, par in sorted(iter(obj.parameters.items()), key=lambda x: x[0]):
            if isinstance(par, ComplexParameter):
                add_par(par.real, name+'.real')
                add_par(par.imag, name+'.imag')
            elif isinstance(par, dict):
                for key, val in par.items():
                    add_par(val, name + '_' + key)
            elif isinstance(par, xr.DataArray):
                if len(par.dims)==1:
                    dimname = par.dims[0]
                else:
                    raise ParameterSpecificationError('Multi-dimensional parameters are not supported')
                for key in par[dimname]:
                    add_par(np.asscalar(par.sel(**{dimname:key})),name+'_'+np.asscalar(key))
            elif isinstance(par, Parameter):
                add_par(par, name)

        parameters = deepcopy(parameters)
        for i, name in enumerate(names):
            parameters[i].name = name
        self.parameters = parameters
        self.ties = ties
Exemplo n.º 18
0
def loadTable(datapath, datatable, delimiter, dtype, engine, indexCols=[], skipLines=1, chunkSize=100000, **kwargs):
    cnt = 0
    with open(datapath) as fh:
        while cnt < skipLines:
            fh.readline()
            cnt += 1
        cnt = 0
        tmpstr = ''
        for l in fh:
            tmpstr += l
            cnt += 1
            if cnt%chunkSize == 0:
                print "Loading chunk #%i"%(int(cnt/chunkSize))
                dataArr = numpy.genfromtxt(StringIO(tmpstr), dtype=dtype, delimiter=delimiter, **kwargs)
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(l[name])) for name in l.dtype.names) for l in dataArr])
                tmpstr = ''
        #Clean up the last chunk
        if len(tmpstr) > 0:
            dataArr = numpy.genfromtxt(StringIO(tmpstr), dtype=dtype, delimiter=delimiter, **kwargs)
            try:
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(l[name])) for name in l.dtype.names) for l in dataArr])
            # If the file only has one line, the result of genfromtxt is a 0-d array, so cannot be iterated
            except TypeError:
                engine.execute(datatable.insert(), [dict((name, numpy.asscalar(dataArr[name])) for name in dataArr.dtype.names),])

    for col in indexCols:
        if hasattr(col, "__iter__"):
            print "Creating index on %s"%(",".join(col))
            colArr = (datatable.c[c] for c in col)
            i = Index('%sidx'%''.join(col), *colArr)
        else:
            print "Creating index on %s"%(col)
            i = Index('%sidx'%col, datatable.c[col])

        i.create(engine)
Exemplo n.º 19
0
def read_hst_filter(fname):
    """
    Reading the gemini filter file into a dataframe

    Parameters
    ----------

    fname: ~str
        path to file to be read

    """

    for i, line in enumerate(open(fname)):
        if line.strip().startswith('1'):
            skiprows = i - 1
            break
    else:
        raise ValueError('File {0} not formatted in Gemini style'.format(fname))

    data = pd.DataFrame(genfromtxt(fname, skip_header=skiprows, usecols=(1, 2)),
                        columns=['wavelength', 'transmission_lambda'])

    start_filter_idx = asscalar(
        (data.transmission_lambda > 0).searchsorted(1) - 1)
    end_filter_idx = (data.transmission_lambda > 0)[::-1].searchsorted(1)
    end_filter_idx = asscalar((len(data) - end_filter_idx) + 1)

    return data.iloc[start_filter_idx:end_filter_idx]
Exemplo n.º 20
0
    def __fitmodel1d(self, Y):
        """Helper for apply_along_axis()"""
        res = self._res
        results = self._model_gen(Y, self._exog).fit()
        t_to_z = lambda t, df: stats.norm.ppf(stats.t.cdf(t, df))
        if isinstance(res, np.ndarray):
            if len(res.shape) == 1:
                tstats = results.t_test(self._res)
                return [np.asscalar(i) for i in [tstats.tvalue,
                                                 tstats.pvalue,
                                                 tstats.effect,
                                                 tstats.sd,
                                                 np.array(tstats.df_denom),
                                                 t_to_z(tstats.tvalue, tstats.df_denom)]]

            elif len(res.shape) == 2:
                fstats = results.f_test(self._res)
                return [np.asscalar(i) for i in
                            [fstats.fvalue,
                             fstats.pvalue]] + [fstats.df_num,
                                                fstats.df_denom]
            else:
                raise ValueError("Test specification (via `res`) has to be 1d or 2d array")
        elif isinstance(res, str):
            return results.__getattribute__(res)
        else:
            return res(results)
Exemplo n.º 21
0
def getSrcCellsValueRange( mainSheet, totalFields, valsCol, i, j=0, k=0, \
                           numProducts=0, specFieldsInd=[] ):
    ''' Returns lists of Values and Names of the Source cells
        for the given i and j indices '''
    # 2 1's b/c spreadInd start from 0 while rows start from 1 in excel
    src_RowIndex = ((k-1)*numProducts*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + ((i-1)*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + np.array(specFieldsInd) + np.array(len(specFieldsInd)*[1])\
                   + np.array(len(specFieldsInd)*[1])

    temp = Cell( mainSheet, np.asscalar( src_RowIndex[0] ), valsCol ).horizontal
    #temp = np.array([ i for i in temp if i!=None])

    srcCells_Val = np.zeros( (len(specFieldsInd), len(temp) ) )
    srcCells_Name = np.empty( srcCells_Val.shape, dtype = 'S10' )

    # CellRange only required here because one of the spread fields (discretion) is missing in later months
    for row in range(0, len(specFieldsInd)):
        temp_Val = CellRange( mainSheet, ( np.asscalar( src_RowIndex[row] ), valsCol), \
                         ( np.asscalar( src_RowIndex[row] ), valsCol+srcCells_Val.shape[1]-1 ) ).value 
        srcCells = CellRange( mainSheet, ( np.asscalar( src_RowIndex[row] ), valsCol), \
                         ( np.asscalar( src_RowIndex[row] ), valsCol+srcCells_Val.shape[1]-1 ) )
        temp_Name = [ cell.name for cell in srcCells ]

        # Zero all NoneType cells
        srcCells_Name[row] = temp_Name #np.array([ temp_Name[i] for i,item in
                                       #enumerate(temp_Val) if item!=None])
        srcCells_Val[row] = np.array([ float( re.sub('[^\d\.\-]','', str(i)) ) \
                                       if i!=None else 0 for i in temp_Val])
        
    return srcCells_Val, srcCells_Name
Exemplo n.º 22
0
 def _findGaps(self, workspace_name, min_i, max_i):
     """
     Find workspace indexes with a low overall intensity
     A histogram with low intensity contains zero-intensity values for many
     of the energy values (Energy is the X-axis)
     :param workspace_name:
     :param min_i: minimum workspace index to look for
     :param max_i: 1+maximum workspace index to look for
     :return: chunks of consecutive workspace indexes with low overall intensity
     """
     zero_fraction = list()  # for each histogram, count the number of zeros
     workspace = sapi.mtd[workspace_name]
     for index in range(min_i, max_i):
         y = workspace.dataY(index)
         zero_fraction.append(1.0 - (1. * numpy.count_nonzero(y)) / len(y))
     # Find workspace indexes zero fraction above a reasonable threshold
     threshold = numpy.mean(zero_fraction) + 2 * numpy.std(zero_fraction)  # above twice the standard deviation
     high_zero_fraction = min_i + (numpy.where(zero_fraction > threshold))[0]
     # split the high_zero_fraction indexes into chunks of consecutive indexes
     #  Example: if high_zero_fraction=[3,7,8,9,11,15,16], then we split into [3],[7,8,9], [11], [15,16]
     gaps = list()  # intensity gaps, because high zero fraction means low overall intensity
     gap = [numpy.asscalar(high_zero_fraction[0]), ]
     for index in range(1, len(high_zero_fraction)):
         if high_zero_fraction[index] - high_zero_fraction[index - 1] == 1:
             gap.append(numpy.asscalar(high_zero_fraction[index]))  # two consecutive indexes
         else:
             gaps.append(gap)
             gap = [numpy.asscalar(high_zero_fraction[index]), ]
     gaps.append(gap)  # final dangling gap has to be appended
     return gaps  # a list of lists
Exemplo n.º 23
0
def rformat(item, precision=2, pretty=True):
    #NOTE: LOOK AT pprint
    '''
    Apply numerical formatting recursively for arbitrarily nested iterators, 
    optionally applying a conversion function on each item.
    '''
    if isinstance(item, str):
        return item
    
    if isinstance(item, (int, float)):
        return minfloatformat(item, precision)
        
    try:                #array-like items with len(item) in [0,1]
        #NOTE: This will suppress the type representation of the object str
        if isinstance(np.asscalar(item), str):
            #np.asscalar converts np types to python builtin types (Phew!!)
            return str(item)
            
        if isinstance(np.asscalar(item), (int, float)):
            return minfloatformat(item, precision)
    except:
        #Item is not str, int, float, or convertible to such...
        pass
    
    if isinstance(item, np.ndarray):
        return np.array2string(item, precision=precision)
        #NOTE:  lots more functionality here
        
    return pformat(item)
 def getLowerLimbAngles(self, tf, side):
     """
     Defines the joint angles of the human legs, starting from the position
     of the tf generated accordin to the data coming from the kinect
     
     @param tf tf
     @param 'L' for left lower limb, 'R' for right lower limb
     """
     if side == 'L':        
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'left_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'left_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'left_foot', self.kin_frame, self.last_updated)
     else:            
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'right_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'right_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'right_foot', self.kin_frame, self.last_updated)
         
     if sys_hip is None or sys_knee is None or sys_foot is None:
         return None
     
     vect_kh = (sys_hip[0:3,3] - sys_knee[0:3,3])/  \
               numpy.linalg.norm([sys_hip[0:3,3] - sys_knee[0:3,3]])
     vect_fk = (sys_knee[0:3,3] - sys_foot[0:3,3])/ \
               numpy.linalg.norm([sys_knee[0:3,3] - sys_foot[0:3,3]])
     q2 = - numpy.arccos(utils.checkArg(numpy.asscalar(numpy.dot(vect_kh.T,vect_fk))))
     
     q1 = numpy.asscalar(numpy.arccos(vect_kh[1]))                                       #[0,pi]
     if numpy.asscalar(numpy.arcsin(vect_kh[2])) < 0:                                    #[-pi,pi]
         q1 = -q1 
         
     return [q1, q2]
Exemplo n.º 25
0
def stations_json():

    stations = np.recfromcsv('chi-stations.csv', delimiter=',')

    output = {'type': "FeatureCollection", 'features':[]}

    for s in stations:

        output['features'].append({
            'type': "Feature",
            "id": np.asscalar(s[0]),
            "geometry": {
                "type":"Point",
                "coordinates":[np.asscalar(s[2]),np.asscalar(s[1])] #long, lat
            },
            "geometry_name": "origin_geom",
            "properties": {
                'name': s[3]
            }})

    f = io.open('chi-stations.json', 'w', encoding='utf-8') 
    f.write(unicode(json.dumps(output, ensure_ascii=False)))
    f.close()

    json_output=open('chi-stations.json')
    output_data = json.load(json_output)
    pprint(output_data)
    json_output.close()
def run_epoch(session, m, mode):

    total_cost = 0.0
    num_samples_seen= 0
    total_num_correct_predictions= 0

    if mode == 'training':
        if flags.first_training_epoch:
            flags.first_training_epoch= False

        num_correct_predictions,num_samples, _ = session.run([m.num_correct_predictions,m.num_samples, m.train_op])

        avg_accuracy = num_correct_predictions/num_samples
        print("Traversed through %d samples." %num_samples_seen)
        return np.asscalar(avg_accuracy)

    else:
        if flags.first_validation_epoch or flags.testing_epoch:
            flags.first_validation_epoch= False
            flags.testing_epoch= False

        cost, num_correct_predictions,num_samples = session.run([m.cost ,m.num_correct_predictions,m.num_samples])

        accuracy= num_correct_predictions/num_samples
        print("total cost is %.4f" %total_cost)
        return np.asscalar(accuracy)
Exemplo n.º 27
0
def noisy_alignment_similarity_transform(source, target, noise_type='uniform',
                                         noise_percentage=0.1,
                                         allow_alignment_rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between the source
    and target shapes by adding noise to its parameters.

    Parameters
    ----------
    source : `menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target : `menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_type : ``{'uniform', 'gaussian'}``, optional
        The type of noise to be added.
    noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional
        The standard percentage of noise to be added. If `float`, then the same
        amount of noise is applied to the scale, rotation and translation
        parameters of the optimal similarity transform. If `list` of
        `float` it must have length 3, where the first, second and third elements
        denote the amount of noise to be applied to the scale, rotation and
        translation parameters, respectively.
    allow_alignment_rotation : `bool`, optional
        If ``False``, then the rotation is not considered when computing the
        optimal similarity transform between source and target.

    Returns
    -------
    noisy_alignment_similarity_transform : `menpo.transform.Similarity`
        The noisy Similarity Transform between source and target.
    """
    if isinstance(noise_percentage, float):
        noise_percentage = [noise_percentage] * 3
    elif len(noise_percentage) == 1:
        noise_percentage *= 3

    similarity = AlignmentSimilarity(source, target,
                                     rotation=allow_alignment_rotation)

    if noise_type is 'gaussian':
        s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1))
        r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1))
        t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2)

        s = scale_about_centre(target, 1 + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    elif noise_type is 'uniform':
        s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) - 1)
        r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) - 1)
        t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1)

        s = scale_about_centre(target, 1. + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    else:
        raise ValueError('Unexpected noise type. '
                         'Supported values are {gaussian, uniform}')

    return similarity.compose_after(t.compose_after(s.compose_after(r)))
    def __init__(self, train_plans, purchased_plan):
        classes, indices, y = np.unique(purchased_plan.values, return_index=True, return_inverse=True)
        lov_classes, lov_indices, y_lov = np.unique(train_plans.values, return_index=True, return_inverse=True)
        old_to_new_purchased = dict()
        old_to_new_lov = dict()
        for k in range(len(classes)):
            # create inverse mapping that returns new class label given the old class label
            old_to_new_purchased[str(np.asscalar(purchased_plan.values[indices[k]]))] = k
        for k in range(len(lov_classes)):
            old_to_new_lov[str(np.asscalar(train_plans.values[lov_indices[k]]))] = k
        self.old_to_new = old_to_new_purchased
        self.old_to_new_lov = old_to_new_lov
        self.nclasses_purchased = len(classes)
        self.nclasses_lov = len(np.unique(train_plans.values))
        self.classes = classes
        self.classes_lov = lov_classes
        self.priors = np.zeros((self.nclasses_purchased, self.nclasses_lov))
        new_id = pd.Series(data=y, index=purchased_plan.index)
        for j in xrange(self.nclasses_lov):
            class_counts = np.bincount(new_id.ix[train_plans[train_plans == lov_classes[j]].index],
                                       minlength=len(classes))
            # priors[k, j] is fraction in class k (new label) with last observed value as class j (new label)
            if np.sum(class_counts) > 0:
                self.priors[:, j] = class_counts / float(np.sum(class_counts))

        prior_norm = self.priors.sum(axis=0)
        prior_norm[prior_norm == 0] = 1.0  # don't divide by zero
        self.priors /= prior_norm  # normalize so probabilities sum to one
Exemplo n.º 29
0
  def get_statistics(self, attribute=0):
    attribute = self._storage["attribute/%s" % attribute]

    if "min" not in attribute.attrs or "max" not in attribute.attrs:
      attribute_min = None
      attribute_max = None

      chunk_size = 1000
      for begin in numpy.arange(0, len(attribute), chunk_size):
        slice = attribute[begin : begin + chunk_size]
        if attribute.dtype.char in ["O", "S", "U"]:
          data_min = min(slice)
          data_max = max(slice)
          attribute_min = str(data_min) if attribute_min is None else str(min(data_min, attribute_min))
          attribute_max = str(data_max) if attribute_max is None else str(max(data_max, attribute_max))
        else:
          slice = slice[numpy.invert(numpy.isnan(slice))]
          if len(slice):
            data_min = numpy.asscalar(slice.min())
            data_max = numpy.asscalar(slice.max())
            attribute_min = data_min if attribute_min is None else min(data_min, attribute_min)
            attribute_max = data_max if attribute_max is None else max(data_max, attribute_max)

      if attribute_min is not None:
        attribute.attrs["min"] = attribute_min
      if attribute_max is not None:
        attribute.attrs["max"] = attribute_max

    return dict(min=attribute.attrs.get("min", None), max=attribute.attrs.get("max", None))
def getSrcCellsValueRange( mainSheet, totalFields, valsCol, i, j=0, k=0, \
                           numProducts=0, numRates=0, specFieldsInd=[] ):
    ''' Returns lists of Values and Names of the Source cells
        for the given i and j indices '''
    # 2 1's b/c spreadInd start from 0 while rows start from 1 in excel
    src_RowIndex = ((k-1)*numProducts*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + ((i-1)*totalFields)* np.array(len(specFieldsInd)*[1])\
                   + np.array(specFieldsInd)+ np.array(len(specFieldsInd)*[1])\
                   + np.array(len(specFieldsInd)*[1])
    temp = Cell( mainSheet, np.asscalar( src_RowIndex[0] ), valsCol ).horizontal
    temp = np.array([ i for i in temp if i!=None])
    
    srcCells_Val = np.zeros( (len(specFieldsInd), len( temp )) )
    srcCells_Name = np.empty( srcCells_Val.shape, dtype = 'S10' )

    for row in range(0, len(specFieldsInd)):
        temp_Val = Cell( mainSheet, np.asscalar( src_RowIndex[row] ), \
                                valsCol ).horizontal
        srcCells = Cell( mainSheet, np.asscalar( src_RowIndex[row] ), \
                         valsCol ).horizontal_range 
        temp_Name = [ cell.name for cell in srcCells ]

        # Ignore all NoneType cells
        srcCells_Name[row] = np.array([ temp_Name[i] for i,item in \
                                        enumerate(temp_Val) if item!=None])
        srcCells_Val[row] = np.array([ float( re.sub('[^\d\.\-]','', str(i)) ) \
                                       for i in temp_Val if i!=None ])
        
    return srcCells_Val, srcCells_Name
Exemplo n.º 31
0
 def estimation_errors_area(self) -> float:
     return np.asscalar(np.sum(self.estimation_errors()))
Exemplo n.º 32
0
def build_Schw_dict(*args, **kwargs):
    """ Function to build a dict of Schwarzschild QNMs.

    Loops over values of (s,l), using SchwOvertoneSeq to find
    sequences in n.

    TODO Documentation

    Parameters
    ----------
    s_arr: [int] [default: [-2, -1, 0]]
      Array of s values to run over.

    n_max: int [default: 20]
      Maximum overtone number to run over (inclusive).

    l_max: int [default: 20]
      Maximum angular harmonic number to run over (inclusive).

    tol: float [default: 1e-10]
      Tolerance to pass to SchwOvertoneSeq.

    Returns
    -------
    dict
      A dict with tuple keys (s,l,n).
      The value at d[s,l,n] is a tuple (omega, cf_err, n_frac)
      where omega is the frequency omega_{s,l,n}, cf_err is the
      estimated truncation error for the continued fraction, and
      n_frac is the depth of the continued fraction evaluation.

    """

    # TODO: Should we allow any other params to be customizable?
    s_arr   = kwargs.get('s_arr',   [-2, -1, 0])
    n_max   = kwargs.get('n_max',   20)
    l_max   = kwargs.get('l_max',   20)

    tol     = kwargs.get('tol',     1e-10)

    Schw_dict = {}
    Schw_err_dict = {}

    for s in s_arr:
        ls = np.arange(l_min(s,0),l_max+1)
        for l in ls:
            Schw_seq = SchwOvertoneSeq(s=s, l=l,
                                       n_max=n_max, tol=tol)
            try:
                Schw_seq.find_sequence()
            except:
                logging.warn("Failed at s={}, l={}".format(s, l))
            for n, (omega, cf_err, n_frac) in enumerate(zip(Schw_seq.omega,
                                                            Schw_seq.cf_err,
                                                            Schw_seq.n_frac)):
                Schw_dict[(s,l,n)] = (np.asscalar(omega),
                                      np.asscalar(cf_err),
                                      int(n_frac))
                Schw_dict[(-s,l,n)] = Schw_dict[(s,l,n)]

    return Schw_dict
Exemplo n.º 33
0
def generateVal(data, r, c, meanVector, row):
    Z1 = getZ(data[:, r], meanVector[0][r], row)
    Z2 = getZ(data[:, c], meanVector[0][c], row)
    return np.asscalar(getCorr(Z1, Z2))
Exemplo n.º 34
0
        X0 = normalXi
    return X0


#ϵ = 0.001
eigVector = findEig(X0, covMatrix, epsilon)
print(eigVector)

u1 = eigVector[:, 0]
print(u1)

u2 = eigVector[:, 1]
print(u2)

#Calculte eigvalues
lambda1 = np.asscalar(np.dot(np.dot(u1.transpose(), covMatrix), u1))
print(lambda1)

lambda2 = np.asscalar(np.dot(np.dot(u2.transpose(), covMatrix), u2))
print(lambda2)

# Project original points(Z) to 2 eigvectors
eigTrans = eigVector.transpose()
P = np.dot(eigVector, eigTrans)
projZ = (np.dot(P, Zt)).transpose()
res = (np.dot(eigTrans, Zt)).transpose()
print(res)

x = np.array(res[:, 0])
y = np.array(res[:, 1])
plt.scatter(x, y)
Exemplo n.º 35
0
 def wrapper(value, *args, **kwargs):
     result = func(numpy.array(value), *args, **kwargs)
     if numpy.isscalar(value):
         result = numpy.asscalar(result)
     return result
Exemplo n.º 36
0
#################################
## initialize parameter matrix ##
#################################
k = X.shape[1]
np.random.seed(10815657)
nudge=0.01
Beta = np.random.uniform(low=-1*nudge, high=1*nudge, size=k).reshape(k, 1)

####################
## Newton Rhapson ##
####################
m = 5
J = pd.DataFrame()
J['iterative_step'] = range(0,m+1)
J['cost'] = np.full(m+1, None)
J.loc[0, 'cost'] = np.asscalar(np.dot((Y - np.dot(X, Beta)).T, (Y - np.dot(X, Beta))))

inv_J2_partial_Beta2 = inv(2*np.dot(X.T, X))
for i in range(1, m+1):    
    J_partial_Beta = (-2*np.dot(X.T, Y)) + (2*np.dot(np.dot(X.T, X), Beta))
    Beta = Beta - np.dot(inv_J2_partial_Beta2, J_partial_Beta)
    J.loc[i, 'cost'] = np.asscalar(np.dot((Y - np.dot(X, Beta)).T, (Y - np.dot(X, Beta))))
    del J_partial_Beta    

plt.plot(J['iterative_step'], J['cost'])
plt.title('Newton Rhapson') 
plt.xlabel('Iterative Step') 
plt.ylabel('Cost') 
print(Beta)

## built in package
Exemplo n.º 37
0
    mat = obj_object.active_material

    drawAmo = list(range(5, 15))
    freqAmo = np.bincount(drawAmo)
    AmoDraw = np.random.choice(np.arange(len(freqAmo)),
                               1,
                               p=freqAmo / len(drawAmo),
                               replace=False)
    drawObj = list(range(1, len(model_file)))
    freqObj = np.bincount(drawObj)
    ObjDraw = np.random.choice(np.arange(len(freqObj)),
                               AmoDraw,
                               p=freqObj / len(drawObj),
                               replace=True)

    num_object = np.asscalar(AmoDraw)
    # num_object = 1
    print(num_object)
    object_label = []
    anchor_pose = np.zeros((num_object, 6))  #location x,y,z, euler x,y,z

    for i in np.arange(num_object):
        file_idx = randint(0, len(model_file) - 1)
        file_model = model_file[file_idx]
        solo_model = model_solo[file_idx]
        imported_object = bpy.ops.import_mesh.stl(filepath=file_model,
                                                  filter_glob="*.stl",
                                                  files=[{
                                                      "name": solo_model,
                                                      "name": solo_model
                                                  }],
Exemplo n.º 38
0
def main():
    listener()
    # env = gym.make('CartPole-v0')
    # env.seed(0)
    ob_space = 4
    Policy = Policy_net('policy')
    Old_Policy = Policy_net('old_policy')
    PPO = PPOTrain(Policy, Old_Policy, gamma=GAMMA)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        writer = tf.summary.FileWriter('./log/train', sess.graph)
        sess.run(tf.global_variables_initializer())
        reset()
        obs = robot_state.robot_state
        reward = 0
        success_num = 0

        for iteration in range(ITERATION):  # episode
            observations = []
            actions = []
            v_preds = []
            rewards = []
            run_policy_steps = 0
            while True:  # run policy RUN_POLICY_STEPS which is much less than episode length
                run_policy_steps += 1
                obs = np.stack([obs]).astype(
                    dtype=np.float32)  # prepare to feed placeholder Policy.obs
                act, v_pred = Policy.act(obs=obs, stochastic=True)
                print('act: ', act, 'v_pred: ', v_pred)
                act = np.asscalar(act)
                v_pred = np.asscalar(v_pred)

                observations.append(obs)
                actions.append(act)
                v_preds.append(v_pred)
                rewards.append(reward)

                reward, done = take_action(act)
                time.sleep(0.25)
                next_obs = robot_state.robot_state

                if done:
                    v_preds_next = v_preds[1:] + [
                        0
                    ]  # next state of terminate state has 0 state value
                    reset()
                    obs = robot_state.robot_state
                    reward = -1
                    break
                else:
                    obs = next_obs

            writer.add_summary(
                tf.Summary(value=[
                    tf.Summary.Value(tag='episode_length',
                                     simple_value=run_policy_steps)
                ]), iteration)
            writer.add_summary(
                tf.Summary(value=[
                    tf.Summary.Value(tag='episode_reward',
                                     simple_value=sum(rewards))
                ]), iteration)

            if sum(rewards) >= 195:
                success_num += 1
                render = True
                if success_num >= 100:
                    saver.save(sess, './model/model.ckpt')
                    print('Clear!! Model saved.')
                    break
            else:
                success_num = 0

            gaes = PPO.get_gaes(rewards=rewards,
                                v_preds=v_preds,
                                v_preds_next=v_preds_next)

            # convert list to numpy array for feeding tf.placeholder
            observations = np.reshape(observations, [len(observations), 4])
            actions = np.array(actions).astype(dtype=np.int32)
            rewards = np.array(rewards).astype(dtype=np.float32)
            v_preds_next = np.array(v_preds_next).astype(dtype=np.float32)
            gaes = np.array(gaes).astype(dtype=np.float32)
            gaes = (gaes - gaes.mean())
            print('gaes', gaes)

            PPO.assign_policy_parameters()

            inp = [observations, actions, rewards, v_preds_next, gaes]

            # train
            for epoch in range(4):
                sample_indices = np.random.randint(
                    low=0, high=observations.shape[0],
                    size=64)  # indices are in [low, high)
                sampled_inp = [
                    np.take(a=a, indices=sample_indices, axis=0) for a in inp
                ]  # sample training data
                PPO.train(obs=sampled_inp[0],
                          actions=sampled_inp[1],
                          rewards=sampled_inp[2],
                          v_preds_next=sampled_inp[3],
                          gaes=sampled_inp[4])

            summary = PPO.get_summary(obs=inp[0],
                                      actions=inp[1],
                                      rewards=inp[2],
                                      v_preds_next=inp[3],
                                      gaes=inp[4])[0]

            writer.add_summary(summary, iteration)
        writer.close()
Exemplo n.º 39
0
    def detectNeedle(self, magnitudevolume, phasevolume, maskThreshold,
                     ridgeOperator, slice_index):

        #magnitude volume
        magn_imageData = magnitudevolume.GetImageData()
        magn_rows, magn_cols, magn_zed = magn_imageData.GetDimensions()
        magn_scalars = magn_imageData.GetPointData().GetScalars()
        magn_imageOrigin = magnitudevolume.GetOrigin()
        magn_imageSpacing = magnitudevolume.GetSpacing()
        magn_matrix = vtk.vtkMatrix4x4()
        magnitudevolume.GetIJKToRASMatrix(magn_matrix)
        # magnitudevolume.CreateDefaultDisplayNodes()

        # phase volume
        phase_imageData = phasevolume.GetImageData()
        phase_rows, phase_cols, phase_zed = phase_imageData.GetDimensions()
        phase_scalars = phase_imageData.GetPointData().GetScalars()

        #Convert vtk to numpy
        magn_array = numpy_support.vtk_to_numpy(magn_scalars)
        numpy_magn = magn_array.reshape(magn_zed, magn_rows, magn_cols)
        phase_array = numpy_support.vtk_to_numpy(phase_scalars)
        numpy_phase = phase_array.reshape(phase_zed, phase_rows, phase_cols)

        # slice = int(slice_number)
        # slice = (slice_index)
        # maskThreshold = int(maskThreshold)

        #2D Slice Selector
        ### 3 3D values are : numpy_magn , numpy_phase, mask
        numpy_magn = numpy_magn[slice_index, :, :]
        numpy_phase = numpy_phase[slice_index, :, :]
        #mask = mask[slice,:,:]
        numpy_magn_sliced = numpy_magn.astype(np.uint8)

        #mask thresholding
        img = cv2.pyrDown(numpy_magn_sliced)
        _, threshed = cv2.threshold(numpy_magn_sliced, maskThreshold, 255,
                                    cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(threshed, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

        #find maximum contour and draw
        cmax = max(contours, key=cv2.contourArea)
        epsilon = 0.002 * cv2.arcLength(cmax, True)
        approx = cv2.approxPolyDP(cmax, epsilon, True)
        cv2.drawContours(numpy_magn_sliced, [approx], -1, (0, 255, 0), 3)

        width, height = numpy_magn_sliced.shape

        #fill maximum contour and draw
        mask = np.zeros([width, height, 3], dtype=np.uint8)
        cv2.fillPoly(mask, pts=[cmax], color=(255, 255, 255))
        mask = mask[:, :, 0]

        #phase_cropped
        phase_cropped = cv2.bitwise_and(numpy_phase, numpy_phase, mask=mask)
        phase_cropped = np.expand_dims(phase_cropped, axis=0)

        node = slicer.vtkMRMLScalarVolumeNode()
        node.SetName('phase_cropped')
        slicer.mrmlScene.AddNode(node)

        slicer.util.updateVolumeFromArray(node, phase_cropped)
        node.SetOrigin(magn_imageOrigin)
        node.SetSpacing(magn_imageSpacing)
        node.SetIJKToRASDirectionMatrix(magn_matrix)

        unwrapped_phase = slicer.vtkMRMLScalarVolumeNode()
        unwrapped_phase.SetName('unwrapped_phase')
        slicer.mrmlScene.AddNode(unwrapped_phase)

        #
        # Run phase unwrapping module
        #
        parameter_name = slicer.mrmlScene.GetNodeByID(
            'vtkMRMLCommandLineModuleNode1')

        if parameter_name is None:
            slicer.cli.createNode(slicer.modules.phaseunwrapping)
        else:
            pass
        cli_input = slicer.util.getFirstNodeByName('phase_cropped')
        cli_output = slicer.util.getNode('unwrapped_phase')

        cli_params = {'inputVolume': cli_input, 'outputVolume': cli_output}
        self.cliParamNode = slicer.cli.runSync(slicer.modules.phaseunwrapping,
                                               node=self.cliParamNode,
                                               parameters=cli_params)

        pu_imageData = unwrapped_phase.GetImageData()
        pu_rows, pu_cols, pu_zed = pu_imageData.GetDimensions()
        pu_scalars = pu_imageData.GetPointData().GetScalars()
        pu_NumpyArray = numpy_support.vtk_to_numpy(pu_scalars)
        phaseunwrapped = pu_NumpyArray.reshape(pu_zed, pu_rows, pu_cols)

        # for debug
        self.phaseunwrapped_numpy = pu_NumpyArray.reshape(pu_cols, pu_rows)

        I = phaseunwrapped.squeeze()
        A = np.fft.fft2(I)
        A1 = np.fft.fftshift(A)

        # Image size
        [M, N] = A.shape

        # filter size parameter
        R = 5

        X = np.arange(0, N, 1)
        Y = np.arange(0, M, 1)

        [X, Y] = np.meshgrid(X, Y)
        Cx = 0.5 * N
        Cy = 0.5 * M
        Lo = np.exp(-(((X - Cx)**2) + ((Y - Cy)**2)) / ((2 * R)**2))
        Hi = 1 - Lo

        J = A1 * Lo
        J1 = np.fft.ifftshift(J)
        B1 = np.fft.ifft2(J1)

        K = A1 * Hi
        K1 = np.fft.ifftshift(K)
        B2 = np.fft.ifft2(K1)
        B2 = np.real(B2)

        #Remove border  for false positive
        border_size = 20
        top, bottom, left, right = [border_size] * 4
        mask_borderless = cv2.copyMakeBorder(mask, top, bottom, left, right,
                                             cv2.BORDER_CONSTANT, (0, 0, 0))

        kernel = np.ones((5, 5), np.uint8)
        mask_borderless = cv2.erode(mask_borderless, kernel, iterations=5)
        mask_borderless = ndimage.binary_fill_holes(mask_borderless).astype(
            np.uint8)
        x, y = mask_borderless.shape
        mask_borderless = mask_borderless[0 + border_size:y - border_size,
                                          0 + border_size:x - border_size]

        B2 = cv2.bitwise_and(B2, B2, mask=mask_borderless)

        H_elems = hessian_matrix(B2, sigma=5, order='rc')
        maxima_ridges, minima_ridges = hessian_matrix_eigvals(H_elems)

        hessian_det = maxima_ridges + minima_ridges
        coordinate = peak_local_max(maxima_ridges,
                                    num_peaks=1,
                                    min_distance=20,
                                    exclude_border=True,
                                    indices=True)
        x2 = np.asscalar(coordinate[:, 1])
        y2 = np.asscalar(coordinate[:, 0])
        point = (x2, y2)
        coords = [x2, y2, slice_index]
        circle1 = plt.Circle(point, 2, color='red')

        # Find or create MRML transform node
        transformNode = None
        try:
            transformNode = slicer.util.getNode('TipTransform')
        except slicer.util.MRMLNodeNotFoundException as exc:
            transformNode = slicer.mrmlScene.AddNewNodeByClass(
                'vtkMRMLLinearTransformNode')
            transformNode.SetName("TipTransform")

        transformNode.SetAndObserveMatrixTransformToParent(magn_matrix)

        # Fiducial Creation
        fidNode1 = None
        try:
            fidNode1 = slicer.util.getNode('needle_tip')
        except slicer.util.MRMLNodeNotFoundException as exc:
            fidNode1 = slicer.mrmlScene.AddNewNodeByClass(
                "vtkMRMLMarkupsFiducialNode", "needle_tip")

        fidNode1.RemoveAllMarkups()

        #  fidNode1.CreateDefaultDisplayNodes()
        #  fidNode1.SetMaximumNumberOfControlPoints(1)

        fidNode1.AddFiducialFromArray(coords)
        fidNode1.SetAndObserveTransformNodeID(transformNode.GetID())

        ###TODO: dont delete the volume after use. create a checkpoint to update on only one volume
        delete_wrapped = slicer.mrmlScene.GetFirstNodeByName('phase_cropped')
        slicer.mrmlScene.RemoveNode(delete_wrapped)
        delete_unwrapped = slicer.mrmlScene.GetFirstNodeByName(
            'unwrapped_phase')
        slicer.mrmlScene.RemoveNode(delete_unwrapped)

        return True
def calculate_likelihoods(D, X, C):
    if D:
        print(' ')
        print('calculate_likelihoods(X,C)')

    L = []

    for i in range(0, len(C)):

        c = C[i]

        xy = c["xy"]
        cv = c["covariance"]
        iv = c["inverse"]
        dt = c["determinant"]
        pi = c["pi"]
        color = c["color"]

        # Calculate the determinant of the covariance of the cluster.

        # Calculate normalizing factor.
        nf = 1
        try:
            sdt = sqrt(dt)
        except Exception as e:
            print(' ')
            print('sqrt(dt) failed.')
            print('e:', e)
            print('dt:', dt)
        try:
            nf = 1 / ((2 * pi) * (sdt))
        except Exception as e:
            print(' ')
            print('nf calculation failed.')
            print('e:', e)
            print('pi:', pi)
            print('dt:', dt)

        # Initialize likelihood list for cluster.
        l = []

        for j in range(0, len(X)):

            # Replace data points with (data points - centroid).
            x = X[j]

            # Calculate variance.
            a1 = x[0] - xy[0]
            b1 = x[1] - xy[1]
            x_row = np.array([a1, b1])
            x_row_1 = x_row.reshape([2, 1])

            # Multiply x by inverse.
            try:
                a2 = np.dot(iv, x_row)
            except Exception as e:
                print(' ')
                print('=== Error ===')
                print('np.dot(iv,x_row) threw an Exception.')
                print('e:', e)
                print('x_row:')
                print(x_row)
                print('iv:')
                print(iv)

            # Multipy by x.
            x_row_2 = x_row_1.reshape([1, 2])
            a4 = np.dot(a2, x_row)
            a5 = np.asscalar(a4)

            # Multiply by negative half.
            a6 = (-1 / 2) * a5

            # Calculate exponent.
            a7 = np.exp(a6)

            # Multiply by normalization factor.
            a8 = nf * a7

            # Add likelihood to list for cluster.
            l.append(a8)

            # Debugging
            if D:
                print(' ')
                print('debug')
                print(color, 'xy:', xy)
                print('x', x)
                print('x_row:', x_row)
                print('x_row_1:', x_row_1)
                print('x_row_2:', x_row_2)
                print('iv:')
                print(iv)
                print('(x_row_1)T(iv):')
                print(a2)
                print('(x_row_1)T(iv)(x_row_2)')
                print(a5)
                print('exp:', a6)
                print('term:', a7)
                print('likelihood:', color, a8)

        # Add list of likelihoods to the cluster.
        L.append(l)

    return L
Exemplo n.º 41
0
 def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
   tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
Exemplo n.º 42
0
    def param_search(self, IMG, zlim, theta_xlim, theta_ylim, xy_res, z_res):
        # variables
        nspacing = 5
        nbatch = nspacing**5
        n_each_param = 1
        ntrain = [5000, 500, 200, 200]
        ## load net
        print('loading models...')
        # go to training dir
        fsys.cd('D:/Dropbox/__Atlas__')
        params = {
            'load_file':
            'D:/__Atlas__/model_saves/model-regNETshallow_257x265_507000',
            'save_file': 'regNETshallow',
            'save_interval': 1000,
            'batch_size': 32,
            'lr': .0001,  # Learning rate
            'rms_decay': 0.9,  # RMS Prop decay
            'rms_eps': 1e-8,  # RMS Prop epsilon
            'width': 265,
            'height': 257,
            'numParam': 3,
            'train': True
        }
        netR = AtlasNet(params)
        params[
            'load_file'] = 'D:/__Atlas__/model_saves/model-Elastic2_257x265_1000'
        params['save_file'] = 'Elastic2'
        netE = ElasticNet(params)

        max_score = 0
        max_param = [0, 0, 0]
        #loop parameter resolution
        for res in range(4):
            ## gen training batch
            print('\ngenerating training batch...')

            z_vals = []  #np.random.randint(zlim[0],zlim[1],nbatch)
            theta_x_vals = []
            theta_y_vals = []
            dxy_vals = []
            dz_vals = []
            zs = np.linspace(zlim[0], zlim[1], nspacing)
            txs = np.linspace(theta_xlim[0], theta_xlim[1], nspacing)
            tys = np.linspace(theta_ylim[0], theta_ylim[1], nspacing)
            dxys = np.linspace(xy_res[0], xy_res[1], nspacing)
            dzs = np.linspace(z_res[0], z_res[1], nspacing)

            for z in zs:
                for tx in txs:
                    for ty in tys:
                        for dxy in dxys:
                            for dz in dzs:
                                z_vals.append(z)
                                theta_x_vals.append(tx)
                                theta_y_vals.append(ty)
                                dxy_vals.append(dxy)
                                dz_vals.append(dz)

            big_batch = np.zeros(shape=(n_each_param * nbatch, params['width'],
                                        params['height'], 2),
                                 dtype=np.float32)

            pos = (0, n_each_param)
            for z, tx, ty, dxy, dz in tqdm.tqdm(
                    zip(z_vals, theta_x_vals, theta_y_vals, dxy_vals,
                        dz_vals)):
                big_batch[pos[0]:pos[1], :, :, :] = self.gen_batch(
                    IMG,
                    z,
                    tx,
                    ty,
                    dxy,
                    dz,
                    subsample=True,
                    n_subsample=n_each_param
                )  #[np.random.choice(range(len(IMG.images)),n_each_param),:,:,:]
                pos = (pos[0] + n_each_param, pos[1] + n_each_param)
            ## retrain network
            # plt.figure()
            # merged = np.dstack(
            #   (np.array(big_batch[0, :, :, 0]), np.array(big_batch[0, :, :, 1]), np.array(big_batch[0, :, :, 1])))
            # plt.imshow(merged)
            # plt.show()
            # exit()

            # retrain elastic after every rigid? or something?
            if ntrain[res] > 0:
                netR, netE = self.retrain_TF_Both(netR,
                                                  netE,
                                                  big_batch,
                                                  ntrain=ntrain[res],
                                                  nbatch=32)

            ## compute fits
            print('\ncomputing parameter scores...')
            score = np.zeros(shape=(nbatch, ))
            pos = 0
            for z, tx, ty, dxy, dz in tqdm.tqdm(
                    zip(z_vals, theta_x_vals, theta_y_vals, dxy_vals,
                        dz_vals)):
                batch = self.gen_batch(IMG, z, tx, ty, dxy, dz)
                #batch = self.gen_batch(IMG,z,tx,ty)
                tformed, xytheta, _ = netR.run_batch(batch)
                for i in range(tformed.shape[0]):
                    batch[i, :, :, 1] = np.squeeze(tformed[i, :, :])
                tformed, theta, cost_cc, cost = netE.run_batch(batch)
                # compute global cost function
                p_consistency = IMG.score_param_consistency(xytheta)
                score[pos] = .4 * np.mean(cost_cc) + .6 * p_consistency - cost
                pos += 1

            ## update parameter ranges
            plt.figure()
            n, bins, _ = plt.hist(score)
            plt.show()
            max_id = np.argmax(score)
            print('\nmax score:', np.max(score), 'pos:', z_vals[max_id],
                  theta_x_vals[max_id], theta_y_vals[max_id], dxy_vals[max_id],
                  dz_vals[max_id])

            if np.max(score) > max_score:
                max_score = np.max(score)
                max_param = [
                    z_vals[max_id], theta_x_vals[max_id], theta_y_vals[max_id],
                    dxy_vals[max_id], dz_vals[max_id]
                ]
                #update z
                z_span = np.asscalar(np.diff(zlim)) / 4.
                zlim = [z_vals[max_id] - z_span, z_vals[max_id] + z_span]
                # update theta x
                tx_span = np.asscalar(np.diff(theta_xlim)) / 4.
                theta_xlim = [
                    theta_x_vals[max_id] - tx_span,
                    theta_x_vals[max_id] + tx_span
                ]
                # update theta y
                ty_span = np.asscalar(np.diff(theta_ylim)) / 4.
                theta_ylim = [
                    theta_y_vals[max_id] - ty_span,
                    theta_y_vals[max_id] + ty_span
                ]

                # update dxy
                dxy_span = np.asscalar(np.diff(xy_res)) / 4.
                xy_res = [
                    dxy_vals[max_id] - dxy_span, dxy_vals[max_id] + dxy_span
                ]

                # update dz
                dz_span = np.asscalar(np.diff(z_res)) / 4.
                z_res = [dz_vals[max_id] - dz_span, dz_vals[max_id] + dz_span]

        ## close net
        tf.reset_default_graph()
        del netR, netE
        return max_score, max_param
Exemplo n.º 43
0
 def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
   tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
Exemplo n.º 44
0
 def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
   tensor_proto.scomplex_val.extend([np.asscalar(v)
                                     for x in proto_values
                                     for v in [x.real, x.imag]])
Exemplo n.º 45
0
 def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
   tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
Exemplo n.º 46
0
 def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
   tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
Exemplo n.º 47
0
# x = np.matrix([2,1]).transpose()
# H = np.matrix([[1,0],[0,1]])
# A = np.matrix([[4,0],[0,2]])
# g = np.matrix([4*(float(x[0]) - 1), 2*float(x[1])]).transpose()

x = np.matrix([1,-1]).transpose()
H = np.matrix([[2,1],[1,1]])
A = np.matrix([[2,0],[0,6]])
def calc_g(x):
    return np.matrix([2*np.asscalar(x[0]), 6*np.asscalar(x[1])]).transpose()

g = calc_g(x)
g_prev = g
d = -H * g
lam = np.asscalar(-(g.transpose()*d)/(d.transpose() * A * d))
x = x + lam * d

print('x[1]:')
print(x)

for i in range(2,3):
    print('===========i: {}==========='.format(i))
    print('g:')
    print(g)
    g = calc_g(x)
    p = lam * d
    q = -g_prev + g
    print("p,q: ")
    print(p)
    print(q)
Exemplo n.º 48
0
def ExtractBitsFromFloat16(x):
  return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
Exemplo n.º 49
0
def post_process_detection(locations, confidences):
    """ Takes Multibox predictions and their confidences scores, chooses the best one, and returns a stretched version.
    """
    # pred_locs: [x1,y1,x2,y2] in normalized coordinates
    pred_locs = np.clip(locations[0], 0., 1.)

    # First, we want to filter the proposals that are not in the square.
    filtered_bboxes = [[0., 0., 0., 0.]]
    filtered_confs = [0]
    best_conf = 0.
    for bbox, conf in zip(pred_locs, confidences[0]):
        if conf > .005 and conf > best_conf:
            best_conf = conf
            filtered_bboxes[0] = bbox
            filtered_confs[0] = conf
        """
        print(f"bbox: {bbox}, conf: {conf}")
        if bbox[0] < 0.: continue
        if bbox[1] < 0.: continue
        if bbox[2] > 1.: continue
        if bbox[3] > 1.: continue
        filtered_bboxes.append(bbox)
        filtered_confs.append(conf)
        """

    # Convert these from lists to numpy arrays.
    filtered_bboxes = np.array(filtered_bboxes)
    filtered_confs = np.array(filtered_confs)

    # Now, take the bounding box we are most confident in. If it's above 0.005 confidence, stretch and return it.
    # Otherwise, just return an empty list and 0 confidence.
    if filtered_bboxes.shape[0] != 0:
        sorted_idxs = np.argsort(filtered_confs.ravel())[::-1]
        filtered_bboxes = filtered_bboxes[sorted_idxs]
        filtered_confs = filtered_confs[sorted_idxs]
        bbox_to_keep = filtered_bboxes[0].ravel()
        conf_to_keep = float(np.asscalar(filtered_confs[0]))
        # are we enough confident?
        if conf_to_keep > .005:
            # Unpack the bbox values.
            xmin, ymin, xmax, ymax = bbox_to_keep

            # Make sure the bbox hasn't collapsed on itself.
            if abs(xmin - xmax) < 0.005:
                if xmin > 0.006:
                    xmin = xmin - 0.005
                else:
                    xmax = xmax + 0.005
            if abs(ymin - ymax) < 0.005:
                if ymin > 0.006:
                    ymin = ymin - 0.005
                else:
                    ymax = ymax + 0.005

            # Whether we use constant (vs width-based) stretch.
            useConstant = 0

            # Set the constant stretch amount.
            stretch_const = 0.06

            # Set the fractional stretch factor.
            stretch_factor = 0.10

            if useConstant:
                stretch_constx = stretch_const
                stretch_consty = stretch_const
            else:
                stretch_constx = (xmax -
                                  xmin) * stretch_factor  #  of the width
                stretch_consty = (ymax - ymin) * stretch_factor

            # Calculate the amount to stretch the x by.
            x_stretch = np.minimum(xmin, abs(1 - xmax))
            x_stretch = np.minimum(x_stretch, stretch_constx)

            # Calculate the amount to stretch the y by.
            y_stretch = np.minimum(ymin, abs(1 - ymax))
            y_stretch = np.minimum(y_stretch, stretch_consty)

            # Adjust the bounding box accordingly.
            xmin -= x_stretch
            xmax += x_stretch
            ymin -= y_stretch
            ymax += y_stretch
            return [xmin, ymin, xmax, ymax], conf_to_keep
        else:
            # No good proposals, return substantial nothing.
            return [], 0.
    else:
        # No proposals, return nothing.
        return [], 0.
Exemplo n.º 50
0
def calc_g(x):
    return np.matrix([2*np.asscalar(x[0]), 6*np.asscalar(x[1])]).transpose()
Exemplo n.º 51
0
def RunEpoch(
    args,
    epoch,
    train_model,
    test_model,
    total_batch_size,
    num_shards,
    expname,
    explog,
):
    '''
    Run one epoch of the trainer.
    TODO: add checkpointing here.
    '''
    # TODO: add loading from checkpoint
    log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
    epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
    train_accuracy = 0
    train_loss = 0
    display_count = 20
    prefix = "gpu_{}".format(train_model._devices[0])
    for i in range(epoch_iters):
        # This timeout is required (temporarily) since CUDA-NCCL
        # operators might deadlock when synchronizing between GPUs.
        timeout = 600.0 if i == 0 else 60.0
        with timeout_guard.CompleteInTimeOrDie(timeout):
            t1 = time.time()
            workspace.RunNet(train_model.net.Proto().name)
            t2 = time.time()
            dt = t2 - t1
        train_accuracy += workspace.FetchBlob(prefix + '/accuracy')
        train_loss += workspace.FetchBlob(prefix + '/loss')
        if (i + 1) % display_count == 0:  # or (i + 1) % epoch_iters == 0:
            fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
            log.info(
                fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
            train_fmt = "Training loss: {}, accuracy: {}"
            log.info(
                train_fmt.format(train_loss / display_count,
                                 train_accuracy / display_count))

            r_train_accuracy.append(train_accuracy / display_count)
            r_loss.append(train_loss / display_count)

            train_accuracy = 0
            train_loss = 0

            test_accuracy = 0
            ntests = 0
            for _ in range(0, 20):
                workspace.RunNet(test_model.net.Proto().name)
                for g in test_model._devices:
                    test_accuracy += np.asscalar(
                        workspace.FetchBlob("gpu_{}".format(g) + '/accuracy'))
                    ntests += 1
            test_accuracy /= ntests
            r_test_accuracy.append(test_accuracy)  #my
    # print(dir(data_parallel_model))

    # exit(0)
    num_images = epoch * epoch_iters * total_batch_size
    prefix = "gpu_{}".format(train_model._devices[0])
    accuracy = workspace.FetchBlob(prefix + '/accuracy')
    loss = workspace.FetchBlob(prefix + '/loss')
    learning_rate = workspace.FetchBlob('SgdOptimizer_0_lr_gpu0')
    test_accuracy = 0
    if (test_model is not None):
        # Run 100 iters of testing
        ntests = 0
        for _ in range(0, 100):
            workspace.RunNet(test_model.net.Proto().name)
            for g in test_model._devices:
                test_accuracy += np.asscalar(
                    workspace.FetchBlob("gpu_{}".format(g) + '/accuracy'))
                ntests += 1
        test_accuracy /= ntests
        r_test_accuracy.append(test_accuracy)  #my
    else:
        test_accuracy = (-1)
    test_fmt = "Testing accuracy: {}"
    log.info(test_fmt.format(test_accuracy))

    explog.log(input_count=num_images,
               batch_count=(i + epoch * epoch_iters),
               additional_values={
                   'accuracy': accuracy,
                   'loss': loss,
                   'learning_rate': learning_rate,
                   'epoch': epoch,
                   'test_accuracy': test_accuracy,
               })
    assert loss < 40, "Exploded gradients :("

    # TODO: add checkpointing
    return epoch + 1
Exemplo n.º 52
0
    def __call__(
        self,
        optProb,
        sens=None,
        sensStep=None,
        sensMode=None,
        storeHistory=None,
        hotStart=None,
        storeSens=True,
        timeLimit=None,
    ):
        """
        This is the main routine used to solve the optimization
        problem.

        Parameters
        ----------
        optProb : Optimization or Solution class instance
            This is the complete description of the optimization problem
            to be solved by the optimizer

        sens : str or python Function.
            Specifiy method to compute sensitivities. The default is
            None which will use SNOPT's own finite differences which
            are vastly superiour to the pyOptSparse implementation. To
            explictly use pyOptSparse gradient class to do the
            derivatives with finite differenes use 'FD'. 'sens'
            may also be 'CS' which will cause pyOptSpare to compute
            the derivatives using the complex step method. Finally,
            'sens' may be a python function handle which is expected
            to compute the sensitivities directly. For expensive
            function evaluations and/or problems with large numbers of
            design variables this is the preferred method.

        sensStep : float
            Set the step size to use for design variables. Defaults to
            1e-6 when sens is 'FD' and 1e-40j when sens is 'CS'.

        sensMode : str
            Use 'pgc' for parallel gradient computations. Only
            available with mpi4py and each objective evaluation is
            otherwise serial

        storeHistory : str
            File name of the history file into which the history of
            this optimization will be stored

        hotStart : str
            File name of the history file to "replay" for the
            optimziation.  The optimization problem used to generate
            the history file specified in 'hotStart' must be
            **IDENTICAL** to the currently supplied 'optProb'. By
            identical we mean, **EVERY SINGLE PARAMETER MUST BE
            IDENTICAL**. As soon as he requested evaluation point
            from SNOPT does not match the history, function and
            gradient evaluations revert back to normal evaluations.

        storeSens : bool
            Flag sepcifying if sensitivities are to be stored in hist.
            This is necessay for hot-starting only.

        timeLimit : float
            Specify the maximum amount of time for optimizer to run.
            Must be in seconds. This can be useful on queue systems when
            you want an optimization to cleanly finish before the
            job runs out of time.
        """

        self.callCounter = 0
        self.storeSens = storeSens

        # Store the starting time if the keyword timeLimit is given:
        self.timeLimit = timeLimit
        self.startTime = time.time()

        if len(optProb.constraints) == 0:
            # If the user *actually* has an unconstrained problem,
            # snopt sort of chokes with that....it has to have at
            # least one constraint. So we will add one
            # automatically here:
            self.unconstrained = True
            optProb.dummyConstraint = True

        self.optProb = optProb
        self.optProb.finalizeDesignVariables()
        self.optProb.finalizeConstraints()

        self._setInitialCacheValues()
        self._setSens(sens, sensStep, sensMode)
        blx, bux, xs = self._assembleContinuousVariables()
        ff = self._assembleObjective()

        oneSided = False
        # Set the number of nonlinear constraints snopt *thinks* we have:
        if self.unconstrained:
            nnCon = 1
        else:
            indices, tmp1, tmp2, fact = self.optProb.getOrdering(
                ["ne", "ni"], oneSided=oneSided)
            nnCon = len(indices)
            self.optProb.jacIndices = indices
            self.optProb.fact = fact
            self.optProb.offset = np.zeros_like(fact)

            # Again, make SNOPT think we have a nonlinear constraint when all
            # our constraints are linear
            if nnCon == 0:
                nnCon = 1
                self.optProb.jacIndices = [0]
                self.optProb.fact = np.array([1.0])
                self.optProb.offset = np.zeros_like(self.optProb.fact)

        # We make a split here: If the rank is zero we setup the
        # problem and run SNOPT, otherwise we go to the waiting loop:
        if self.optProb.comm.rank == 0:

            # Determine the sparsity structure of the full Jacobian
            # -----------------------------------------------------

            # Gather dummy data and process Jacobian:
            gcon = {}
            for iCon in self.optProb.constraints:
                gcon[iCon] = self.optProb.constraints[iCon].jac

            jac = self.optProb.processConstraintJacobian(gcon)

            if self.optProb.nCon > 0:
                # We need to reorder this full Jacobian...so get ordering:
                indices, blc, buc, fact = self.optProb.getOrdering(
                    ["ne", "ni", "le", "li"], oneSided=oneSided)
                jac = extractRows(jac, indices)  # Does reordering
                scaleRows(jac, fact)  # Perform logical scaling
            else:
                blc = [-1e20]
                buc = [1e20]

            if self._snopt_jac_map_csr_to_csc is None:
                self._snopt_jac_map_csr_to_csc = mapToCSC(jac)

            # # CSC data is the csr data with the csc_indexing applied
            Acol = jac["csr"][IDATA][self._snopt_jac_map_csr_to_csc[IDATA]]
            # # CSC Row indices are just the row indices information from the map
            indA = self._snopt_jac_map_csr_to_csc[IROW] + 1
            # # CSC Column pointers are the column information from the map
            locA = self._snopt_jac_map_csr_to_csc[ICOL] + 1

            if self.optProb.nCon == 0:
                ncon = 1
            else:
                ncon = len(indices)

            # Initialize the Print and Summary files
            # --------------------------------------
            iPrint = self.getOption("iPrint")
            PrintFile = os.path.join(self.getOption("Print file"))
            if iPrint != 0 and iPrint != 6:
                ierror = snopt.openunit(iPrint, PrintFile, "replace",
                                        "sequential")
                if ierror != 0:
                    raise Error("Failed to properly open %s, ierror = %3d" %
                                (PrintFile, ierror))

            iSumm = self.getOption("iSumm")
            SummFile = os.path.join(self.getOption("Summary file"))
            if iSumm != 0 and iSumm != 6:
                ierror = snopt.openunit(iSumm, SummFile, "replace",
                                        "sequential")
                if ierror != 0:
                    raise Error("Failed to properly open %s, ierror = %3d" %
                                (SummFile, ierror))

            # Calculate the length of the work arrays
            # --------------------------------------
            nvar = self.optProb.ndvs
            lencw = 500
            leniw = 500 + 100 * (ncon + nvar)
            lenrw = 500 + 200 * (ncon + nvar)

            self.options["Total integer workspace"][1] = leniw
            self.options["Total real workspace"][1] = lenrw

            cw = np.empty((lencw, 8), "c")
            iw = np.zeros(leniw, np.intc)
            rw = np.zeros(lenrw, np.float)
            snopt.sninit(iPrint, iSumm, cw, iw, rw)

            # Memory allocation
            nnObj = nvar
            nnJac = nvar
            iObj = np.array(0, np.intc)
            neA = len(indA)
            neGcon = neA  # The nonlinear Jacobian and A are the same
            iExit = 0

            # Set the options into the SNOPT instance
            self._set_snopt_options(iPrint, iSumm, cw, iw, rw)

            mincw, miniw, minrw, cw = snopt.snmemb(iExit, ncon, nvar, neA,
                                                   neGcon, nnCon, nnJac, nnObj,
                                                   cw, iw, rw)

            if (minrw > lenrw) or (miniw > leniw) or (mincw > lencw):
                if mincw > lencw:
                    lencw = mincw
                    cw = np.array((lencw, 8), "c")
                    cw[:] = " "
                if miniw > leniw:
                    leniw = miniw
                    iw = np.zeros(leniw, np.intc)
                if minrw > lenrw:
                    lenrw = minrw
                    rw = np.zeros(lenrw, np.float)

                snopt.sninit(iPrint, iSumm, cw, iw, rw)

                # snInit resets all the options to the defaults.
                # Set them again!
                self._set_snopt_options(iPrint, iSumm, cw, iw, rw)

            # Setup argument list values
            start = np.array(self.options["Start"][1])
            ObjAdd = np.array([0.0], np.float)
            ProbNm = np.array(self.optProb.name, "c")
            cdummy = -1111111  # this is a magic variable defined in SNOPT for undefined strings
            cw[51, :] = cdummy  # we set these to cdummy so that a placeholder is used in printout
            cw[52, :] = cdummy
            cw[53, :] = cdummy
            cw[54, :] = cdummy
            xs = np.concatenate((xs, np.zeros(ncon, np.float)))
            bl = np.concatenate((blx, blc))
            bu = np.concatenate((bux, buc))
            leniu = 2
            lenru = 3
            cu = np.array(["        "], "c")
            iu = np.zeros(leniu, np.intc)
            ru = np.zeros(lenru, np.float)
            hs = np.zeros(nvar + ncon, np.intc)

            Names = np.array(["        "], "c")
            pi = np.zeros(ncon, np.float)
            rc = np.zeros(nvar + ncon, np.float)
            inform = np.array([-1], np.intc)
            mincw = np.array([0], np.intc)
            miniw = np.array([0], np.intc)
            minrw = np.array([0], np.intc)
            nS = np.array([0], np.intc)
            ninf = np.array([0], np.intc)
            sinf = np.array([0.0], np.float)

            # Set history/hotstart
            self._setHistory(storeHistory, hotStart)

            # The snopt c interface
            timeA = time.time()
            # fmt: off
            snopt.snkerc(start, nnCon, nnObj, nnJac, iObj, ObjAdd, ProbNm,
                         self._userfg_wrap, snopt.snlog, snopt.snlog2,
                         snopt.sqlog, self._snstop, Acol, indA, locA, bl, bu,
                         Names, hs, xs, pi, rc, inform, mincw, miniw, minrw,
                         nS, ninf, sinf, ff, cu, iu, ru, cw, iw, rw)
            # fmt: on
            optTime = time.time() - timeA

            # Indicate solution finished
            self.optProb.comm.bcast(-1, root=0)

            if self.storeHistory:
                # Record the full state of variables, xs and hs such
                # that we could perform a warm start.
                self.hist.writeData("xs", xs)
                self.hist.writeData("hs", hs)
                self.metadata["endTime"] = datetime.datetime.now().strftime(
                    "%Y-%m-%d %H:%M:%S")
                self.metadata["optTime"] = optTime
                self.hist.writeData("metadata", self.metadata)
                self.hist.close()

            if iPrint != 0 and iPrint != 6:
                snopt.closeunit(self.options["iPrint"][1])
            if iSumm != 0 and iSumm != 6:
                snopt.closeunit(self.options["iSumm"][1])

            # Store Results
            inform = np.asscalar(inform)
            sol_inform = {}
            sol_inform["value"] = inform
            sol_inform["text"] = self.informs[inform]

            # Create the optimization solution
            sol = self._createSolution(optTime,
                                       sol_inform,
                                       ff,
                                       xs[:nvar],
                                       multipliers=pi)

        else:  # We are not on the root process so go into waiting loop:
            self._waitLoop()
            sol = None

        # Communication solution and return
        sol = self._communicateSolution(sol)

        return sol
Exemplo n.º 53
0
    def add_input_layer(self, input_shape):

        self.layers.append(self.sim.Population(
            np.asscalar(np.prod(input_shape[1:], dtype=np.int)),
            self.sim.SpikeSourcePoisson(), label='InputLayer'))
Exemplo n.º 54
0
                frames_per_buffer=CHUNK,
                input=True,
                output=False)

while stream.is_active():
    try:

        input = stream.read(CHUNK, exception_on_overflow=False)
        # bufferからndarrayに変換
        ndarray = np.frombuffer(input, dtype='int16')
        ''' 高速フーリエ変換をして時間領域から周波数領域にする場合は下1行を追加する '''
        #f = np.fft.fft(ndarray)

        # ndarrayからリストに変換
        # Pythonネイティブのint型にして扱いやすくする
        a = [np.asscalar(i) for i in ndarray]

        # 試しに0番目に入っているものを表示してみる
        onsei_data = a[0]
        # 高音だと判断定数
        Kouon = 3000
        # 低音
        Teion = -1000

        if (onsei_data > Kouon):
            print('高音中')
        elif (onsei_data < Teion):
            print('低音中')
        else:
            print('---------')
        ''' 音声を出力する場合はstreamのoutputをTrueにして下2行を追加する '''
Exemplo n.º 55
0
    def _hotspot(self, scan, nside, hemispheres, drange, pVal, logger):
        r"""Gather information about hottest spots in each hemisphere.

        """
        result = {}
        for key, dbound in hemispheres.iteritems():
            mask = ((scan["dec"] >= dbound[0]) & (scan["dec"] <= dbound[1]) &
                    (scan["dec"] > drange[0]) & (scan["dec"] < drange[1]))

            if not np.any(mask):
                logger.info("{0:s}: no events here.".format(key))
                continue

            if not np.any(scan[mask]["nsources"] > 0):
                logger.info("{0}: no over-fluctuation.".format(key))
                continue

            hotspot = np.sort(scan[mask], order=["pVal", "TS"])[-1]
            seed = {p: hotspot[p] for p in self.params}

            logger.info(
                "{0}: hot spot at ra = {1:.1f} deg, dec = {2:.1f} deg".format(
                    key, np.rad2deg(hotspot["ra"]),
                    np.rad2deg(hotspot["dec"])))

            logger.info("p-value = {0:.2f}, t = {1:.2f}".format(
                hotspot["pVal"], hotspot["TS"]))

            logger.info(",".join("{0} = {1:.2f}".format(p, seed[p])
                                 for p in seed))

            result[key] = dict(
                grid=dict(ra=hotspot["ra"],
                          dec=hotspot["dec"],
                          nside=nside,
                          pix=hp.ang2pix(nside, np.pi / 2 -
                                         hotspot["dec"], hotspot["ra"]),
                          TS=hotspot["TS"],
                          pVal=hotspot["pVal"]))

            result[key]["grid"].update(seed)

            fmin, xmin = self.fit_source_loc(hotspot["ra"],
                                             hotspot["dec"],
                                             size=hp.nside2resol(nside),
                                             seed=seed)

            pvalue = np.asscalar(pVal(fmin, np.sin(xmin["dec"])))

            logger.info(
                "Re-fit location: ra = {0:.1f} deg, dec = {1:.1f} deg".format(
                    np.rad2deg(xmin["ra"]), np.rad2deg(xmin["dec"])))

            logger.info("p-value = {0:.2f}, t = {1:.2f}".format(pvalue, fmin))

            logger.info(",".join("{0} = {1:.2f}".format(p, xmin[p])
                                 for p in seed))

            result[key]["fit"] = dict(TS=fmin, pVal=pvalue)
            result[key]["fit"].update(xmin)

            if result[key]["grid"]["pVal"] > result[key]["fit"]["pVal"]:
                result[key]["best"] = result[key]["grid"]
            else:
                result[key]["best"] = result[key]["fit"]

        return result
def train(
        context_encoder='baseline',
        corpus=None,
        # optimiser
        optimizer='adam',
        learning_rate=0.001,
        # model params
        embed_full_text_by='word',
        seq_maxlen=500,
        summary_maxlen=200,
        summary_context_length=10,
        internal_representation_dim=2000,
        attention_weight_max_roll=5,
        # training params
        l2_penalty_coeff=0.0,
        train_split=0.75,
        epochs=float('inf'),
        minibatch_size=20,
        seed=None,
        dropout_rate=None,
        # model load/save
        save_params='ass_params.pkl',
        save_params_every=5,
        validate_every=5,
        print_every=5,
        # summary generation on the validation set
        generate_summary=False,
        summary_search_beam_size=2):
    params, tparams, x_embedder, y_embedder = init_params(
        context_encoder=context_encoder,
        corpus=corpus,
        optimizer=optimizer,
        learning_rate=learning_rate,
        embed_full_text_by=embed_full_text_by,
        seq_maxlen=seq_maxlen,
        summary_maxlen=summary_maxlen,
        summary_context_length=summary_context_length,
        internal_representation_dim=internal_representation_dim,
        attention_weight_max_roll=attention_weight_max_roll,
        l2_penalty_coeff=l2_penalty_coeff,
        train_split=train_split,
        epochs=epochs,
        minibatch_size=minibatch_size,
        seed=seed,
        dropout_rate=dropout_rate,
        summary_search_beam_size=summary_search_beam_size)

    # minibatch of encoded texts
    # size batchsize-by-seq_maxlen
    x = T.cast(T.matrix(dtype=theano.config.floatX), 'int32')
    x_mask = T.matrix(dtype=theano.config.floatX)

    # summaries for the minibatch of texts
    y = T.cast(T.matrix(dtype=theano.config.floatX), 'int32')
    y_mask = T.matrix(dtype=theano.config.floatX)

    nll = training_model_output(x, y, x_mask, y_mask, params, tparams,
                                y_embedder)
    cost = nll.mean()

    tparams_to_optimise = {
        key: tparams[key]
        for key in tparams if (not key.endswith('emb')) and key != 'att_P_conv'
    }
    cost += params['l2_penalty_coeff'] * sum(
        [(p**2).sum() for k, p in tparams_to_optimise.items()])
    inputs = [x, y, x_mask, y_mask]

    # after all regularizers - compile the computational graph for cost
    print('Building f_cost... ', end='')
    f_cost = theano.function(inputs, cost, allow_input_downcast=True)
    print('Done')

    print('Computing gradient... ', end='')
    grads = T.grad(cost, list(tparams_to_optimise.values()))
    print('Done')

    # compile the optimizer, the actual computational graph is compiled here
    lr = T.scalar(name='lr')
    print('Building optimizers... ', end='')
    f_grad_shared, f_update = eval(optimizer)(lr, tparams_to_optimise, grads,
                                              inputs, cost)
    print('Done')

    print('Building summary candidate token generator... ', end='')
    f_best_candidates = tfunc_best_candidate_tokens(params, tparams)
    print('Done')

    print('Loading corpus... ', end='')
    x_train, x_test, y_train, y_test, \
    x_mask_train, x_mask_test, \
    y_mask_train, y_mask_test \
        = load_corpus(params, tparams, x_embedder, y_embedder)
    n_train_batches = int(x_train.shape[0] / params['minibatch_size'])
    n_test_batches = int(x_test.shape[0] / params['minibatch_size'])
    print('Done')

    print('Optimization')
    test_ids_to_summarize = sample(range(x_test.shape[0]), 5)
    for epoch in range(epochs):
        print('Epoch', epoch)

        # training of all minibatches
        params['phase'] = 'training'
        training_costs = []
        for batch_id in range(n_train_batches):
            if batch_id % print_every == 0:
                print('Batch {:} '.format(batch_id), end='')
            # compute cost, grads and copy grads to shared variables
            # use_noise.set_value(1.)
            current_batch = range(batch_id * params['minibatch_size'],
                                  (batch_id + 1) * params['minibatch_size'])
            cost = f_grad_shared(x_train[current_batch, :],
                                 y_train[current_batch, :],
                                 x_mask_train[current_batch, :],
                                 y_mask_train[current_batch, :])
            cost = np.asscalar(cost)
            training_costs.append(cost)
            # do the update on parameters
            f_update(learning_rate)
            if batch_id % print_every == 0:
                print('Cost {:.4f}'.format(cost))
        print('Epoch {:} mean training cost {:.4f}'.format(
            epoch, np.mean(training_costs)))

        # save the params
        if epoch % save_params_every == 0:
            print('Saving... ', end='')
            save_params_(params, tparams, save_params)
            print('Done')

        # validate
        # compute the metrics and generate summaries (if requested)
        params['phase'] = 'test'
        if epoch % validate_every == 0:
            print('Validating')
            validate_costs = []
            for batch_id in range(n_test_batches):
                if batch_id % print_every == 0:
                    print('Batch {:} '.format(batch_id), end='')
                current_batch = range(batch_id * params['minibatch_size'],
                                      (batch_id + 1) *
                                      params['minibatch_size'])
                validate_cost = f_cost(x_test[current_batch, :],
                                       y_test[current_batch, :],
                                       x_mask_test[current_batch, :],
                                       y_mask_test[current_batch, :])
                validate_cost = np.asscalar(validate_cost)
                validate_costs.append(validate_cost)
                if batch_id % print_every == 0:
                    print('Validation cost {:.4f}'.format(validate_cost))
            print('Epoch {:} mean validation cost {:.4f}'.format(
                epoch, np.mean(validate_costs)))

            if generate_summary:
                print('Generating summary')
                for i in test_ids_to_summarize:
                    summary_token_ids = summarize(x_test[i, :].flatten(),
                                                  x_mask_test[i, :].flatten(),
                                                  f_best_candidates, params,
                                                  tparams, y_embedder)
                    print('Sample :',
                          y_embedder.documentFromVector(summary_token_ids))
                    print('Truth :',
                          y_embedder.documentFromVector(y_test[i, :])[:20])
def error(y, x, w):
    return y - np.asscalar(np.inner(np.matrix(w), np.matrix(x.values)))
Exemplo n.º 58
0
    def _sensitivity(self, src_ra, src_dec, ts, beta, inj, n_iter, eps, trials,
                     logger, **kwargs):
        time = datetime.datetime.now()
        logger.info("t = {0:.2f}, beta = {1:.2%}".format(ts, beta))

        # If no events have been injected, do a quick an estimation of active
        # region by doing a few trials.
        if (len(trials) < 1 or not np.any(trials["n_inj"] > 0)
                or not np.any(trials["TS"][trials["n_inj"] > 0] > 2. * ts)):

            trials = self._active_region(src_ra, src_dec, ts, beta, inj,
                                         n_iter, trials, logger, **kwargs)

        # Calculate number of injected events needed so that beta percent of
        # the trials have a test statistic larger than ts. Fit closest point
        # to beta value; use existing scrambles to determine a seed for the
        # minimization; restrict fit to region where sampled before.
        stop = False
        niterations = 1
        while not stop:
            bounds = np.percentile(trials["n_inj"][trials["n_inj"] > 0],
                                   q=[self._ub_perc, 100. - self._ub_perc])

            if bounds[0] == 1:
                bounds[0] = np.count_nonzero(trials["n_inj"] == 1) /\
                    np.sum(trials["n_inj"] < 2)

            logger.info(
                "Estimate sensitivity in region {0:.1f} to {1:.1f}...".format(
                    *bounds))

            def residual(n):
                return np.log10((utils.poisson_percentile(
                    n, trials["n_inj"], trials["TS"], ts)[0] - beta)**2)

            seed = np.argmin([residual(n) for n in np.arange(0., bounds[-1])])

            xmin, fmin, success = scipy.optimize.fmin_l_bfgs_b(
                residual, [seed], bounds=[bounds], approx_grad=True)

            mu = np.asscalar(xmin)

            # Get the statistical uncertainty of the quantile.
            b, b_err = utils.poisson_percentile(mu, trials["n_inj"],
                                                trials["TS"], ts)

            logger.info(
                "Best estimate: mu = {0:.2f} ({1:.2%} +/- {2:.2%})".format(
                    mu, b, b_err))

            # If precision is high enough and fit did converge, the wanted
            # value is reached and we can stop the trial computation after this
            # iteration. Otherwise, do more trials with best estimate for mu.
            stop = (b_err < eps and mu > bounds[0] and mu < bounds[-1]
                    and np.fabs(b - beta) < eps)

            if not stop or niterations == 1:
                # To avoid a spiral with too few events, we want only half of
                # all events to be background scrambles after iterations.
                p_bckg = np.sum(trials["n_inj"] == 0) / len(trials)
                mu_min = np.log(1. / (1. - p_bckg))
                mu = np.amax([mu, mu_min])

                logger.info(
                    "Do {0:d} trials with mu = {1:.2f} events...".format(
                        n_iter, mu))

                trials = np.append(
                    trials,
                    self.do_trials(src_ra,
                                   src_dec,
                                   n_iter,
                                   mu=inj.sample(src_ra, mu),
                                   **kwargs))

            niterations += 1

        time = datetime.datetime.now() - time
        flux = inj.mu2flux(mu)

        logger.info("Finished after {0}.".format(time))
        logger.info("mu = {0:.2f}, flux = {1:.2e}".format(mu, flux))
        logger.info("trials = {0} ({1:.2f} / sec)".format(
            len(trials),
            len(trials) / time.seconds))

        return mu, flux, trials
Exemplo n.º 59
0
def accuracy(model):
    accuracy = []
    for device in model._devices:
        accuracy.append(
            np.asscalar(workspace.FetchBlob("gpu_{}/accuracy".format(device))))
    return np.average(accuracy)
Exemplo n.º 60
0
def train(traind, testd, output):
    train = pd.read_csv(traind, delimiter=' ', header=None)
    test = pd.read_csv(testd, delimiter=' ', header=None)
    X_train = train.values[:, :].copy()
    X_train = X_train[:, 0:X_train.shape[1] - 1].copy()
    Y_train = train.values[:, X_train.shape[1]].copy()
    X_test = test.values[:, :].copy()
    X_test = X_test[:, 0:X_test.shape[1] - 1].copy()
    X_train = X_train.astype(np.uint8)
    Y_train = Y_train.astype(np.uint8)
    X_test = X_test.astype(np.uint8)
    Y_ohe = np.zeros((Y_train.shape[0], 10))
    for i in range(Y_train.shape[0]):
        Y_ohe[i, Y_train[i]] = 1
    Train_arr = np.zeros((X_train.shape[0], 32, 32, 3))
    Test_arr = np.zeros((X_test.shape[0], 32, 32, 3))
    for i in range(X_train.shape[0]):
        Train_arr[i, :, :, :] = X_train[i, :].reshape(3, 32,
                                                      32).transpose(1, 2, 0)

    for i in range(X_test.shape[0]):
        Test_arr[i, :, :, :] = X_test[i, :].reshape(3, 32,
                                                    32).transpose(1, 2, 0)
    Train_arr = Train_arr.astype(np.uint8)
    Test_arr = Test_arr.astype(np.uint8)
    np.random.seed(1)
    ini = initializers.glorot_uniform()
    ini1 = initializers.glorot_uniform()
    ini2 = initializers.glorot_uniform()
    ini3 = initializers.glorot_uniform()
    model = Sequential()
    model.add(
        Conv2D(64,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer=ini,
               input_shape=(32, 32, 3)))
    model.add(MaxPooling2D(padding='same', strides=(1, 1)))
    model.add(
        Conv2D(128,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer=ini1))
    model.add(MaxPooling2D(padding='same', strides=(1, 1)))
    model.add(Flatten())
    model.add(Dense(512, activation='relu', kernel_initializer=ini2))
    model.add(Dense(256, activation='relu', kernel_initializer=ini3))
    model.add(BatchNormalization())
    model.add(Dense(10, activation='softmax'))
    opt = optimizers.Adam()
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    X_tr = Train_arr[0:50000, :] / 255
    X_te = Train_arr[40000:50000, :] / 255
    Y_tr = Y_ohe[0:50000, :]
    Y_te = Y_ohe[40000:50000, :]
    model.fit(X_tr,
              Y_tr,
              batch_size=500,
              validation_data=(X_te, Y_te),
              epochs=22)
    y = model.predict(Test_arr / 255)
    a = np.argmax(y, axis=1)
    for param in a:
        print(np.asscalar(param), file=open(output, "a"))