Beispiel #1
0
    def Pattern_Generate(self):
        while True:
            file_Name_List = list(self.attribute_Dict.keys());            
            random.shuffle(file_Name_List);

            index = 0;
            while index < len(self.attribute_Dict):
                if len(self.pattern_Queue) >= self.max_Queue:
                    time.sleep(0.1)
                    continue;

                image_Pattern_List = [resize(imread(os.path.join(self.image_Files_Dir,file_Name).replace("\\", "/")), (self.image_Size, self.image_Size), mode='constant') * 2 - 1 for file_Name in file_Name_List[index:index+self.batch_Size]];
                image_Pattern_List = [np.flip(image, axis=1) if random.random() > 0.5 else image for image in image_Pattern_List];
                attribute_Pattern_List = [self.attribute_Dict[file_Name] for file_Name in file_Name_List[index:index+self.batch_Size]];                
                fake_Attribute_Pattern_List = np.copy(attribute_Pattern_List);
                random.shuffle(fake_Attribute_Pattern_List)

                image_Pattern = np.stack(image_Pattern_List, axis=0).astype(np.float32);
                original_Attribute_Pattern = np.stack(attribute_Pattern_List, axis=0).astype(np.float32);
                fake_Attribute_Pattern = np.stack(fake_Attribute_Pattern_List, axis=0).astype(np.float32);
                
                new_Feed_Dict = {
                    self.placeholder_Dict["Image"]: image_Pattern,
                    self.placeholder_Dict["Original_Attribute"]: original_Attribute_Pattern,
                    self.placeholder_Dict["Fake_Attribute"]: fake_Attribute_Pattern
                    }

                self.pattern_Queue.append(new_Feed_Dict);                
                index += self.batch_Size;
    def _run_mst_decoding(batch_energy: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        heads = []
        head_tags = []
        for energy, length in zip(batch_energy.detach().cpu(), lengths):
            scores, tag_ids = energy.max(dim=0)
            # Although we need to include the root node so that the MST includes it,
            # we do not want any word to be the parent of the root node.
            # Here, we enforce this by setting the scores for all word -> ROOT edges
            # edges to be 0.
            scores[0, :] = 0
            # Decode the heads. Because we modify the scores to prevent
            # adding in word -> ROOT edges, we need to find the labels ourselves.
            instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)

            # Find the labels which correspond to the edges in the max spanning tree.
            instance_head_tags = []
            for child, parent in enumerate(instance_heads):
                instance_head_tags.append(tag_ids[parent, child].item())
            # We don't care what the head or tag is for the root token, but by default it's
            # not necesarily the same in the batched vs unbatched case, which is annoying.
            # Here we'll just set them to zero.
            instance_heads[0] = 0
            instance_head_tags[0] = 0
            heads.append(instance_heads)
            head_tags.append(instance_head_tags)
        return torch.from_numpy(numpy.stack(heads)), torch.from_numpy(numpy.stack(head_tags))
 def testImplicitLargeDiag(self):
   mu = np.array([[1., 2, 3],
                  [11, 22, 33]])      # shape: [b, k] = [2, 3]
   u = np.array([[[1., 2],
                  [3, 4],
                  [5, 6]],
                 [[0.5, 0.75],
                  [1, 0.25],
                  [1.5, 1.25]]])      # shape: [b, k, r] = [2, 3, 2]
   m = np.array([[0.1, 0.2],
                 [0.4, 0.5]])         # shape: [b, r] = [2, 2]
   scale = np.stack([
       np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
                             np.transpose(u[0])),
       np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
                             np.transpose(u[1])),
   ])
   cov = np.stack([np.matmul(scale[0], scale[0].T),
                   np.matmul(scale[1], scale[1].T)])
   logging.vlog(2, "expected_cov:\n{}".format(cov))
   with self.test_session():
     mvn = ds.MultivariateNormalDiagPlusLowRank(
         loc=mu,
         scale_perturb_factor=u,
         scale_perturb_diag=m)
     self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
Beispiel #4
0
def arrow3d(base, r1, r2, ort, l, h, m = 13, pivot = 'tail'):
    x = np.array([1., 0., 0.])
    y = np.array([0., 1., 0.])
    th = np.linspace(0, np.pi*2, m).reshape(-1,1)
    ort = norm_vec(ort)
    if np.sum(ort * x) == 0:
       d1 = norm_vec(np.cross(ort, y))
    else:
       d1 = norm_vec(np.cross(ort, x))
    if pivot == 'tip':
       base = base - (l+h)*ort
    elif pivot == 'mid':
       base = base - (l+h)*ort/2.
    else:
       pass
    d2 = np.cross(ort, d1)
    p = base + l*r1* (d1*np.cos(th) + d2*np.sin(th))
    q = p + l*ort
    p2 = base + l*r2* (d1*np.cos(th) + d2*np.sin(th)) + l*ort
    p3 = base + (l+h)*ort 
    p3 = np.array([p3]*m).reshape(-1, 3)
    t1 = np.stack((p[:-1], q[:-1], p[1:]), axis=1)
    t2 = np.stack((p[1:], q[:-1], q[1:]), axis=1)
    t3 = np.stack((p2[:-1], p3[:-1], p2[1:]), axis=1)
    #t2 = np.dstack((p[1:], q[:-1], q[1:]))
    t1  = np.vstack((t1, t2, t3))
    return t1
Beispiel #5
0
  def step(self, action):
    """Forward a batch of actions to the wrapped environments.

    Args:
      action: Batched action to apply to the environment.

    Raises:
      ValueError: Invalid actions.

    Returns:
      Batch of observations, rewards, and done flags.
    """
    actions = action
    for index, (env, action) in enumerate(zip(self._envs, actions)):
      if not env.action_space.contains(action):
        message = 'Invalid action at index {}: {}'
        raise ValueError(message.format(index, action))
    if self._blocking:
      transitions = [
          env.step(action)
          for env, action in zip(self._envs, actions)]
    else:
      transitions = [
          env.step(action, blocking=False)
          for env, action in zip(self._envs, actions)]
      transitions = [transition() for transition in transitions]
    observs, rewards, dones, infos = zip(*transitions)
    observ = np.stack(observs)
    reward = np.stack(rewards)
    done = np.stack(dones)
    info = tuple(infos)
    return observ, reward, done, info
Beispiel #6
0
 def reset(self):
     for remote in self.remotes:
         remote.send(('reset', None))
     results = [remote.recv() for remote in self.remotes]
     obs, infos = zip(*results)
     best_actions, action_masks = [np.stack(x) for x in get_best_actions_from_infos(infos)]
     return np.stack(obs), best_actions, action_masks
Beispiel #7
0
def check_rnn_forward(layer, inputs, deterministic=True):
    if isinstance(inputs, mx.nd.NDArray):
        inputs.attach_grad()
    else:
        for x in inputs:
            x.attach_grad()
    layer.collect_params().initialize()
    with mx.autograd.record():
        out = layer.unroll(3, inputs, merge_outputs=False)[0]
        mx.autograd.backward(out)
        out = layer.unroll(3, inputs, merge_outputs=True)[0]
        out.backward()

    np_out = out.asnumpy()
    if isinstance(inputs, mx.nd.NDArray):
        np_dx = inputs.grad.asnumpy()
    else:
        np_dx = np.stack([x.grad.asnumpy() for x in inputs], axis=1)

    layer.hybridize()

    with mx.autograd.record():
        out = layer.unroll(3, inputs, merge_outputs=False)[0]
        mx.autograd.backward(out)
        out = layer.unroll(3, inputs, merge_outputs=True)[0]
        out.backward()

    if isinstance(inputs, mx.nd.NDArray):
        input_grads = inputs.grad.asnumpy()
    else:
        input_grads = np.stack([x.grad.asnumpy() for x in inputs], axis=1)

    if deterministic:
        mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)
        mx.test_utils.assert_almost_equal(np_dx, input_grads, rtol=1e-3, atol=1e-5)
def filters_bank(M, N, J, L=8):
    filters = {}
    filters['psi'] = []

    offset_unpad = 0
    for j in range(J):
        for theta in range(L):
            psi = {}
            psi['j'] = j
            psi['theta'] = theta
            psi_signal = morlet_2d(M, N, 0.8 * 2**j, (int(L - L / 2 - 1) - theta) * np.pi / L, 3.0 / 4.0 * np.pi / 2**j,offset=offset_unpad)  # The 5 is here just to match the LUA implementation :)
            psi_signal_fourier = fft.fft2(psi_signal)
            for res in range(j + 1):
                psi_signal_fourier_res = crop_freq(psi_signal_fourier, res)
                psi[res] = torch.FloatTensor(np.stack((np.real(psi_signal_fourier_res), np.imag(psi_signal_fourier_res)), axis=2))
                # Normalization to avoid doing it with the FFT!
                psi[res].div_(M * N // 2**(2 * j))
            filters['psi'].append(psi)

    filters['phi'] = {}
    phi_signal = gabor_2d(M, N, 0.8 * 2**(J - 1), 0, 0, offset=offset_unpad)
    phi_signal_fourier = fft.fft2(phi_signal)
    filters['phi']['j'] = J
    for res in range(J):
        phi_signal_fourier_res = crop_freq(phi_signal_fourier, res)
        filters['phi'][res] = torch.FloatTensor(np.stack((np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)), axis=2))
        filters['phi'][res].div_(M * N // 2 ** (2 * J))

    return filters
Beispiel #9
0
def load_mask_labels():
    '''Load both target and style masks.
    A mask image (nr x nc) with m labels/colors will be loaded
    as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
    '''
    target_mask_img = load_img(target_mask_path,
                               target_size=(img_nrows, img_ncols))
    target_mask_img = img_to_array(target_mask_img)
    style_mask_img = load_img(style_mask_path,
                              target_size=(img_nrows, img_ncols))
    style_mask_img = img_to_array(style_mask_img)
    if K.image_dim_ordering() == 'th':
        mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
                               target_mask_img.reshape((3, -1)).T])
    else:
        mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
                               target_mask_img.reshape((-1, 3))])

    labels = kmeans(mask_vecs, nb_labels)
    style_mask_label = labels[:img_nrows *
                              img_ncols].reshape((img_nrows, img_ncols))
    target_mask_label = labels[img_nrows *
                               img_ncols:].reshape((img_nrows, img_ncols))

    stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
    style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
                          axis=stack_axis)
    target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
                           axis=stack_axis)

    return (np.expand_dims(style_mask, axis=0),
            np.expand_dims(target_mask, axis=0))
Beispiel #10
0
    def _feed_dict(self, train_batch, is_training=True):

        pred_polys = train_batch['raw_polys'] * np.expand_dims(train_batch['masks'], axis=2)  # (seq,batch,2)
        pred_polys = np.transpose(pred_polys, [1, 0, 2])  # (batch,seq,2)

        pred_mask = np.transpose(train_batch['masks'], [1, 0])  # (batch_size,seq_len)
        cnn_feats = train_batch['cnn_feats']  # (batch_size, 28, 28, 128)

        cells_1 = np.stack([np.split(train_batch['hiddens_list'][-1][0], 2, axis=3)[0]], axis=1)

        cells_2 = np.stack([np.split(train_batch['hiddens_list'][-1][1], 2, axis=3)[0]], axis=1)

        pred_mask_imgs = self.draw_mask(28, 28, pred_polys, pred_mask)

        if is_training:
            raise NotImplementedError()

        r = {
            self._ph.cells_1: cells_1,
            self._ph.cells_2: cells_2,
            self._ph.pred_mask_imgs: pred_mask_imgs,
            self._ph.cnn_feats: cnn_feats,
            self._ph.predicted_mask: pred_mask,
            self._ph.pred_polys: pred_polys,
            self._ph.ious: self._zero_batch
        }

        return r
def split_data(chars, batch_size, num_steps, split_frac=0.9):
    """
    Split character data into training and validation sets, inputs and targets for each set.
    Arguments
    ---------
    chars: character array
    batch_size: Size of examples in each of batch
    num_steps: Number of sequence steps to keep in the input and pass to the network
    split_frac: Fraction of batches to keep in the training set
    Returns train_x, train_y, val_x, val_y
    """

    slice_size = batch_size * num_steps
    n_batches = int(len(chars) / slice_size)

    # Drop the last few characters to make only full batches
    x = chars[: n_batches * slice_size]
    y = chars[1: n_batches * slice_size + 1]

    # Split the data into batch_size slices, then stack them into a 2D matrix
    x = np.stack(np.split(x, batch_size))
    y = np.stack(np.split(y, batch_size))

    # Now x and y are arrays with dimensions batch_size x n_batches*num_steps

    # Split into training and validation sets, keep the first split_frac batches for training
    split_idx = int(n_batches * split_frac)
    train_x, train_y = x[:, :split_idx * num_steps], y[:, :split_idx * num_steps]
    val_x, val_y = x[:, split_idx * num_steps:], y[:, split_idx * num_steps:]

    return train_x, train_y, val_x, val_y
Beispiel #12
0
    def test_arrayize_vectorized_indexer(self):
        for i, j, k in itertools.product(self.indexers, repeat=3):
            vindex = indexing.VectorizedIndexer((i, j, k))
            vindex_array = indexing._arrayize_vectorized_indexer(
                vindex, self.data.shape)
            np.testing.assert_array_equal(
                self.data[vindex], self.data[vindex_array],)

        actual = indexing._arrayize_vectorized_indexer(
            indexing.VectorizedIndexer((slice(None),)), shape=(5,))
        np.testing.assert_array_equal(actual.tuple, [np.arange(5)])

        actual = indexing._arrayize_vectorized_indexer(
            indexing.VectorizedIndexer((np.arange(5),) * 3), shape=(8, 10, 12))
        expected = np.stack([np.arange(5)] * 3)
        np.testing.assert_array_equal(np.stack(actual.tuple), expected)

        actual = indexing._arrayize_vectorized_indexer(
            indexing.VectorizedIndexer((np.arange(5), slice(None))),
            shape=(8, 10))
        a, b = actual.tuple
        np.testing.assert_array_equal(a, np.arange(5)[:, np.newaxis])
        np.testing.assert_array_equal(b, np.arange(10)[np.newaxis, :])

        actual = indexing._arrayize_vectorized_indexer(
            indexing.VectorizedIndexer((slice(None), np.arange(5))),
            shape=(8, 10))
        a, b = actual.tuple
        np.testing.assert_array_equal(a, np.arange(8)[np.newaxis, :])
        np.testing.assert_array_equal(b, np.arange(5)[:, np.newaxis])
Beispiel #13
0
def converter(batch, device, max_caption_length=None):
    """Optional preprocessing of the batch before forward pass."""
    pad = max_caption_length is not None

    imgs = []
    captions = []
    for img, caption in batch:
        # Preproess the caption by either fixing the length by padding (LSTM)
        # or by simply wrapping each caption in an ndarray (NStepLSTM)
        if pad:
            arr = np.full(max_caption_length, _ignore, dtype=np.int32)

            # Clip to max length if necessary
            arr[:len(caption)] = caption[:max_caption_length]
            caption = arr
        else:
            caption = to_device(device, np.asarray(caption, dtype=np.int32))

        imgs.append(img)
        captions.append(caption)

    if pad:
        captions = to_device(device, np.stack(captions))
    imgs = to_device(device, np.stack(imgs))

    return imgs, captions
def corrcoef_raftscope(raftsfits, ROIrows, ROIcols, norm=True):
    """
    Correlation over one or more CCDs, calculating correlation along lines at each time index in ROIcols,
    then averaging.
    :param raftsfits: file list
    :param ROIrows: must be in the format: slice(start, stop)
    :param ROIcols: must be in the format: slice(start, stop)
    :param norm: if True, computes correlation coefficients; if not, returns covariances
    :return:
    """
    stackh = []

    for fl in raftsfits:
        h = pyfits.open(fl)
        for i in range(1, 17):
            stackh.append(h[i].data[ROIrows, ROIcols])
        h.close()
        del h
    stackh = np.stack(stackh)
    print stackh.shape

    a = []

    for numcol in range(ROIcols.stop - ROIcols.start):
        if norm:
            a.append(np.corrcoef(stackh[:, :, numcol]))
        else:
            a.append(np.cov(stackh[:, :, numcol]))
    a = np.stack(a)
    print a.shape

    return a.mean(axis=0)
Beispiel #15
0
def formatPeaksArbitraryPSF(peaks, peaks_type):
    """
    Input peaks array formatter for arbitrary PSFs.

    Based on peaks_type, create a properly formatted ndarray to pass
    to the C library. This is primarily for internal use by newPeaks().
    """
    # These come from the finder, or the unit test code, create peaks
    # as (N,3) with columns x, y, z.
    #
    if (peaks_type == "testing") or (peaks_type == "finder"):
        c_peaks = numpy.stack((peaks["x"],
                               peaks["y"],
                               peaks["z"]), axis = 1)

    # These come from pre-specified peak fitting locations, create peaks
    # as (N,5) with columns x, y, z, background, height.
    #
    elif (peaks_type == "text") or (peaks_type == "hdf5"):
        c_peaks = numpy.stack((peaks["x"],
                               peaks["y"],
                               peaks["z"],
                               peaks["background"],
                               peaks["height"]), axis = 1)
    else:
        raise MultiFitterException("Unknown peaks type '" + peaks_type + "'")

    return numpy.ascontiguousarray(c_peaks, dtype = numpy.float64)
Beispiel #16
0
    def compute_null_stats(self, elec_pair_phase_diff, recalled, elec_pair_stats):

        res = Parallel(n_jobs=12, verbose=5)(delayed(calc_circ_stats)(elec_pair_phase_diff, recalled, True)
                                             for _ in range(self.n_perms))

        # for the rayleigh z and the resultant vector length, compute the actual difference between good and bad
        # memory at each timepoint. Then compute a null distribution from shuffled data. Then compute the rank of the
        # real data compared to the shuffled at each timepoint. Convert rank to z-score and return
        null_elec_pair_zs_rec = np.stack([x['elec_pair_z_rec'] for x in res], 0)
        null_elec_pair_zs_nrec = np.stack([x['elec_pair_z_nrec'] for x in res], 0)
        null_delta_mem_zs = null_elec_pair_zs_rec - null_elec_pair_zs_nrec
        real_delta_mem_zs = elec_pair_stats['elec_pair_z_rec'] - elec_pair_stats['elec_pair_z_nrec']
        delta_mem_zs_rank = np.mean(real_delta_mem_zs > null_delta_mem_zs, axis=0)
        delta_mem_zs_rank[delta_mem_zs_rank == 0] += 1/self.n_perms
        delta_mem_zs_rank[delta_mem_zs_rank == 1] -= 1 / self.n_perms

        null_elec_pair_rvls_rec = np.stack([x['elec_pair_rvl_rec'] for x in res], 0)
        null_elec_pair_rvls_nrec = np.stack([x['elec_pair_rvl_nrec'] for x in res], 0)
        null_delta_mem_rvls = null_elec_pair_rvls_rec - null_elec_pair_rvls_nrec
        real_delta_mem_rvls = elec_pair_stats['elec_pair_rvl_rec'] - elec_pair_stats['elec_pair_rvl_nrec']
        delta_mem_rvls_rank = np.mean(real_delta_mem_rvls > null_delta_mem_rvls, axis=0)
        delta_mem_rvls_rank[delta_mem_rvls_rank == 0] += 1/self.n_perms
        delta_mem_rvls_rank[delta_mem_rvls_rank == 1] -= 1 / self.n_perms

        return norm.ppf(delta_mem_zs_rank), norm.ppf(delta_mem_rvls_rank)
Beispiel #17
0
    def calc_score(self):
        cardtype_names = np.array(
            ['highcard', 'pair', 'twopair', 'threeofakind', 'straight', 'flush', 'fullhouse', 'fourofakind',
             'straightflush'])
        self.cardtype_multiplier = np.array(
            [self.highcard_multiplier, self.pair_multiplier, self.twopair_multiplier, self.threeofakind_multiplier,
             self.straight_multiplier, self.flush_multiplier, self.fullhouse_multiplier, self.fourofakind_multiplier,
             self.straighflush_multiplier])
        self.detected_types = np.stack((self.highcard, self.pair, self.twopair, self.threeofakind,
                                        self.straight, self.flush, self.fullhouse, self.fourofakind,
                                        self.straightflush), axis=0)
        self.hand_vals = np.stack((self.highCardsVal, self.pairScore, self.twoPairScore, self.threeScore,self.straightScore,
                                   self.flushScore,self.fullhouseScore,self.fourofakindScore,self.straightflush_score),axis = 0)

        detected_types = self.detected_types * 1
        self.active_multiplier = self.cardtype_multiplier[:,None,None] * detected_types * self.hand_vals
        self.ordered_multiplier = np.sort(self.active_multiplier,axis = 0)[::-1,:,:]
        highestVals = np.argmax(self.ordered_multiplier[0,:,:], axis=1)
        Winners = (self.ordered_multiplier[0, ::] == np.amax(self.ordered_multiplier[0, :, :], axis=1)[:, None])
        MyWinnerMask = np.zeros(self.player_amount, dtype=int)
        MyWinnerMask[0] = 1
        MyWinnArray = (Winners == MyWinnerMask).all(1)
        MyWins = np.sum(MyWinnArray,axis = 0)



        # print('cardtype_multiplier \n {}'.format(self.cardtype_multiplier))
        # print('detected_types \n {}'.format(detected_types))
        # print('hand_vals \n {}'.format(self.hand_vals))
        # print('active_multiplier \n {}'.format(self.active_multiplier))
        # print('ordered_multiplier \n {}'.format(self.ordered_multiplier))
        # print('highest vals \n {}'.format(highestVals))
        # print('My Wins \n {}'.format(MyWins))

        return MyWins / self.iterations
Beispiel #18
0
	def __init__(self,stripeindex=None):
 		if stripeindex == None:
 			BCfile = MISTFILE_default
 		else:
 			BCfile = '/n/regal/conroy_lab/pac/MISTFILES/MIST_full_{0}.h5'.format(stripeindex)

 		# read in MIST hdf5 table
 		MISTh5 = h5py.File(BCfile,'r')
 		# determine the BC datasets
 		BCTableList = [x for x in MISTh5.keys() if x[:3] == 'BC_']
 		# read in each BC dataset and pull the photometric information
 		for BCT in BCTableList:
	 		BCTABLE = Table(np.array(MISTh5[BCT]))
			if BCT == BCTableList[0]:
				BC = BCTABLE.copy()
			else:
				BCTABLE.remove_columns(['Teff', 'logg', '[Fe/H]', 'Av', 'Rv'])
				BC = hstack([BC,BCTABLE])

 		BC_AV0 = BC[BC['Av'] == 0.0]

		self.bands = BC.keys()
		[self.bands.remove(x) for x in ['Teff', 'logg', '[Fe/H]', 'Av', 'Rv']]

		self.redintr = LinearNDInterpolator(
			(BC['Teff'],BC['logg'],BC['[Fe/H]'],BC['Av']),
			np.stack([BC[bb] for bb in self.bands],axis=1),
			rescale=True
			)
		self.redintr_0 = LinearNDInterpolator(
			(BC_AV0['Teff'],BC_AV0['logg'],BC_AV0['[Fe/H]']),
			np.stack([BC_AV0[bb] for bb in self.bands],axis=1),
			rescale=True
			)
Beispiel #19
0
def test_gaussian_filter_2d_variable_sigma(astronaut):
    astronaut = astronaut[::2, ::2]
    astronaut_stacked = np.stack([astronaut, astronaut.T])
    astronaut_stacked = np.stack([astronaut,
                                  astronaut[:, ::-1],
                                  astronaut[::-1, :],
                                  astronaut[::-1, ::-1]])
    bs = len(astronaut_stacked)
    img = theano.shared(astronaut_stacked[:, np.newaxis])
    sigmas = np.array([3, 1, 2, 0.5], dtype=np.float32)
    sigmas_shared = theano.shared(sigmas)
    theano_blur = gaussian_filter_2d_variable_sigma(img, sigmas_shared,
                                                    border_mode='zero')
    blur = theano_blur.eval()
    assert blur.shape == (bs, 1, 64, 64)
    blur = blur.reshape(bs, 64, 64)
    r, c = 4, 2
    for i, (sigma, astro) in enumerate(zip(sigmas, astronaut_stacked)):
        expected = skimage.filters.gaussian_filter(astro,
                                                   float(sigma), mode='constant')

        np.testing.assert_allclose(blur[i], expected, rtol=0.01, atol=0.02)

        plt.subplot(r, c, 2*i+1)
        plt.imshow(blur[0], cmap='gray')
        plt.subplot(r, c, 2*(i+1))
        plt.imshow(expected, cmap='gray')

    plt_save_and_maybe_show("test_gaussian_blur_2d_variable_sigmas.png")
Beispiel #20
0
def interpolate_using_griddata(trainingPointsU,trainingPointsV,valuesX,valuesY,valuesZ,unknownPointsU,unknownPointsV,taxels,centersAndNormals,taxel_offset):
    ret = interpolation_result();

    trainingPoints = np.stack([np.array(trainingPointsU),np.array(trainingPointsV)],1);
    unknownPoints = np.stack([np.array(unknownPointsU),np.array(unknownPointsV)],1);

    ret.unknownX = scipy.interpolate.griddata(np.array(trainingPoints), np.array(valuesX), np.array(unknownPoints), method="cubic");
    ret.unknownY = scipy.interpolate.griddata(np.array(trainingPoints), np.array(valuesY), np.array(unknownPoints), method="cubic");
    ret.unknownZ = scipy.interpolate.griddata(np.array(trainingPoints), np.array(valuesZ), np.array(unknownPoints), method="cubic");
    ret.normX = np.zeros(ret.unknownX.shape);
    ret.normY = np.zeros(ret.unknownY.shape);
    ret.normZ = np.zeros(ret.unknownZ.shape);

    # the taxel outside the 2D convex hull of the triangle center, use the triangle center
    # TODO use an interpolation method
    for taxelIndex in taxels:
        taxel = taxels[taxelIndex]
        if( np.isnan(ret.unknownX[taxelIndex-taxel_offset]) and not(taxel["type"] is "dummy") ):
            ret.unknownX[taxelIndex-taxel_offset] = centersAndNormals['centers'][taxel["triangleNumber"]][0]
            ret.unknownY[taxelIndex-taxel_offset] = centersAndNormals['centers'][taxel["triangleNumber"]][1]
            ret.unknownZ[taxelIndex-taxel_offset] = centersAndNormals['centers'][taxel["triangleNumber"]][2]

        # normals TODO compute normals, for now just put the normal of the center of the triangle
        if( not(taxel["type"] is "dummy") ):
            ret.normX[taxelIndex-taxel_offset] = centersAndNormals['normals'][taxel["triangleNumber"]][0]
            ret.normY[taxelIndex-taxel_offset] = centersAndNormals['normals'][taxel["triangleNumber"]][1]
            ret.normZ[taxelIndex-taxel_offset] = centersAndNormals['normals'][taxel["triangleNumber"]][2]

    return ret;
def ex_descriptor(omega, f, xia, n_lowest, c_ao, s, tdm, tqm, nocc, nvirt, mol, config):
    """
    ADD DOCUMENTATION
    """
    # Reshape xia
    xia_I = xia.reshape(nocc, nvirt, nocc*nvirt)

    # Transform the transition density matrix into AO basis
    d0I_ao = np.stack(
        np.linalg.multi_dot(
            [c_ao[:, :nocc], xia_I[:, :, i], c_ao[:, nocc:].T]) for i in range(n_lowest))

    # Compute omega in excition analysis for the lowest n excitations
    om = get_omega(d0I_ao, s, n_lowest)

    # Compute the distribution of positions for the hole and electron
    xh, yh, zh = get_exciton_positions(d0I_ao, s, tdm, n_lowest, 'hole')
    xe, ye, ze = get_exciton_positions(d0I_ao, s, tdm, n_lowest, 'electron')

    # Compute the distribution of the square of position for the hole and electron
    x2h, y2h, z2h = get_exciton_positions(d0I_ao, s, tqm, n_lowest, 'hole')
    x2e, y2e, z2e = get_exciton_positions(d0I_ao, s, tqm, n_lowest, 'electron')

    # Compute the distribution of both hole and electron positions
    xhxe, yhye, zhze = get_exciton_positions(d0I_ao, s, tdm, n_lowest, 'both')

    # Compute Descriptors

    # Compute exciton size:
    d_exc = np.sqrt(
        ((x2h - 2 * xhxe + x2e) + (y2h - 2 * yhye + y2e) + (z2h - 2 * zhze + z2e)) / om)

    # Compute centroid electron_hole distance
    d_he = np.abs(((xe - xh) + (ye - yh) + (ze - zh)) / om)

    # Compute hole and electron size
    sigma_h = np.sqrt(
        (x2h / om - (xh / om) ** 2) + (y2h / om - (yh / om) ** 2) + (z2h / om - (zh / om) ** 2))
    sigma_e = np.sqrt(
        (x2e / om - (xe / om) ** 2) + (y2e / om - (ye / om) ** 2) + (z2e / om - (ze / om) ** 2))

    # Compute Pearson coefficients
    cov = (xhxe - xh * xe) + (yhye - yh * ye) + (zhze - zh * ze)
    r_eh = cov / (sigma_h * sigma_e)

    # Compute approximate d_exc and binding energy
    omega_ab = get_omega_ab(d0I_ao, s, n_lowest, mol, config)
    r_ab = get_r_ab(mol)

    d_exc_apprx = np.stack(
        np.sqrt(np.sum(omega_ab[i, :, :] * (r_ab ** 2)) / om[i]) for i in range(n_lowest))
    # binding energy approximated
    xs = np.stack((omega_ab[i, :, :] / r_ab) for i in range(n_lowest))
    xs[np.isinf(xs)] = 0
    binding_en_apprx = np.stack((np.sum(xs[i, :, :]) / om[i]) for i in range(n_lowest))

    descriptors = write_output_descriptors(
        d_exc, d_exc_apprx, d_he, sigma_h, sigma_e, r_eh, binding_en_apprx, n_lowest, omega, f)

    return descriptors
Beispiel #22
0
    def add_polygons(self, polygons, y_offset, x_offset, dimensions):
        '''Creates a label image representation of segmented objects based
        on global map coordinates of object contours.

        Parameters
        ----------
        polygons: List[List[Tuple[Union[int, shapely.geometry.polygon.Polygon]]]]
            label and polygon geometry for segmented objects at each z-plane
            and time point
        y_offset: int
            global vertical offset that needs to be subtracted from
            y-coordinates
        x_offset: int
            global horizontal offset that needs to be subtracted from
            x-coordinates
        dimensions: Tuple[int]
            *y*, *x* dimensions of image pixel planes

        Returns
        -------
        numpy.ndarray[numpy.int32]
            label image
        '''
        zstacks = list()
        for poly in polygons:
            zplanes = list()
            for p in poly:
                image = SegmentationImage.create_from_polygons(
                    p, y_offset, x_offset, dimensions
                )
                zplanes.append(image.array)
            array = np.stack(zplanes, axis=-1)
            zstacks.append(array)
        self.value = np.stack(zstacks, axis=-1)
        return self.value
def get_filters(R, filter_size, P=None, n_rings=None):
   """Perform single-frequency DFT on each ring of a polar-resampled patch"""
   k = filter_size
   filters = {}
   N = n_samples(k)
   from scipy.linalg import dft
   for m, r in R.iteritems():
      rsh = r.shape
      # Get the basis matrices
      weights = get_interpolation_weights(k, m, n_rings=n_rings)
      DFT = dft(N)[m,:]
      LPF = np.dot(DFT, weights).T
      cosine = np.real(LPF).astype(np.float32)
      sine = np.imag(LPF).astype(np.float32)
       # Project taps on to rotational basis
      r = np.reshape(r, np.stack([rsh[0],rsh[1]*rsh[2]]))
      ucos = np.reshape(np.dot(cosine, r), np.stack([k, k, rsh[1], rsh[2]]))
      usin = np.reshape(np.dot(sine, r), np.stack([k, k, rsh[1], rsh[2]]))
      if P is not None:
         # Rotate basis matrices
         ucos_ = np.cos(P[m])*ucos + np.sin(P[m])*usin
         usin = -np.sin(P[m])*ucos + np.cos(P[m])*usin
         ucos = ucos_
      filters[m] = (ucos, usin)
   return filters
Beispiel #24
0
 def translist_to_traj(tlist):
     obs_T_Do = np.stack([trans[0] for trans in tlist]);  assert obs_T_Do.shape == (len(tlist), self.obs_space.storage_size)
     obsfeat_T_Df = np.stack([trans[1] for trans in tlist]); assert obsfeat_T_Df.shape[0] == len(tlist)
     adist_T_Pa = np.stack([trans[2] for trans in tlist]); assert adist_T_Pa.ndim == 2 and adist_T_Pa.shape[0] == len(tlist)
     a_T_Da = np.stack([trans[3] for trans in tlist]); assert a_T_Da.shape == (len(tlist), self.action_space.storage_size)
     r_T = np.stack([trans[4] for trans in tlist]); assert r_T.shape == (len(tlist),)
     return Trajectory(obs_T_Do, obsfeat_T_Df, adist_T_Pa, a_T_Da, r_T)
def get_non_missing(ids, x, y, real_codes):
    """
    Takes lists of the data and removes missing data!
    :param ids:
    :param x:
    :param y:
    :param real_codes:
    :return:
    """

    dataset = zip(ids, x, y, real_codes)
    dataset = np.array(dataset, dtype=object)
    non_miss = dataset[~(dataset[:,3] == '""')]

    id_clean = non_miss[:,0].tolist()           ##Takes first column of non_missing matrix to writes it to a list
    text_clean = non_miss[:,1]
    code_clean = non_miss[:,2]
    real_codes_clean = non_miss[:,3].tolist()
    real_codes_clean = [float(i) for i in real_codes_clean]    ##Turns real_codes into floats for memory efficiency
    real_codes_clean = np.array(real_codes_clean)

    text_clean = np.stack(text_clean, axis=0)   ## Makes everything a 2D array instead of array of arrays...
    code_clean = np.stack(code_clean, axis=0)

    return [id_clean, text_clean, code_clean, real_codes_clean]
Beispiel #26
0
 def step(self, action):
     # x = np.argmax(action[:image_width])
     # r = (np.argmax(action[image_width:]) - 1)
     # pic = self.canvas[:, :, 0]
     # if (r != -1):
     #    r = 2 ** r
     #    for i in range(image_width):
     #        if(np.sum(pic[i, x : x + r + 1])):
     #            self.draw(x, i, r)
     #            break
     x = (action[:image_width] + 1) / 2.
     y = (action[image_width:] + 1) / 2.
     grey = x * y.reshape(image_width, 1)
     grey = grey.reshape((image_width, image_width, 1))
     grey = (grey * (255, 255, 255) / 4).astype('uint8')
     grey = np.minimum(grey, self.canvas)
     self.canvas -= grey
     diff = self.diff()
     reward = (self.lastdiff - diff) / self.rewardscale # reward is positive if diff increased
     self.lastdiff = diff
     self.stepnum += 1
     ob = self.observation()
     self.canvas = np.stack(np.rot90(self.canvas))
     self.target = np.stack(np.rot90(self.target))
     self.time += 1. / max_step        
     return ob, reward, (self.stepnum >= max_step), None # o,r,d,i
Beispiel #27
0
 def read(self, input_path):
     '''
     Reads in the data from input files
     '''
     self.lr_inputs = None
     self.sr_outputs = None
     print(input_path)
     filenames = glob.glob(input_path + '*')
     #TODO: remove assertion
     assert len(filenames) > 0
     random.shuffle(filenames)
     filenames = filenames[0:150]
     print('Length: ' + str(len(filenames)))
     filenames.sort()
     outputs = []
     inputs = []
     for filename in filenames:
         output_img = cv2.imread(filename)
         # Asserts the image is read correctly and not empty
         assert output_img.shape[0] > 0
         assert output_img.shape[1] > 0
         #TODO: read in actual depth
         output_depth = np.random.random((output_img.shape[0], output_img.shape[1], 1))
         #print(type(output_img))
         output_img = np.concatenate((output_img, output_depth), 2)
         #print(type(output_img))
         outputs.append(output_img)
         input_img = compute_lr_input(
                                      output_img, downsampling_factor_x=2, 
                                      downsampling_factor_y=2, blur_sigma=1.6, noise_sigma=0.03)
         inputs.append(input_img)
     self.sr_outputs = np.stack(outputs, axis=0)
     self.lr_inputs = np.stack(inputs, axis=0)
Beispiel #28
0
    def _read(self, key):
        ifnone = lambda a, b: b if a is None else a

        y = key[1]
        x = key[2]
        if isinstance(x, slice):
            xstart = ifnone(x.start,0)
            xstop = ifnone(x.stop,self.raster_size[0])
            xstep = xstop - xstart
        else:
            raise TypeError("Loc style access elements must be slices, e.g., [:] or [10:100]")
        if isinstance(y, slice):
            ystart = ifnone(y.start, 0)
            ystop = ifnone(y.stop, self.raster_size[1])
            ystep = ystop - ystart
        else:
            raise TypeError("Loc style access elements must be slices, e.g., [:] or [10:100]")

        pixels = (xstart, ystart, xstep, ystep)
        if isinstance(key[0], (int, np.integer)):
            return self.read_array(band=int(key[0]+1), pixels=pixels)

        elif isinstance(key[0], slice):
            # Given some slice iterate over the bands and get the bands and pixel space requested
            arrs = []
            for band in list(list(range(1, self.nbands + 1))[key[0]]):
                arrs.append(self.read_array(band, pixels = pixels))
            return np.stack(arrs)

        else:
            arrs = []
            for b in key[0]:
                arrs.append(self.read_array(band=int(b+1), pixels=pixels))
        return np.stack(arrs)
Beispiel #29
0
def main(args):
    # load the model
    model = load_model(args.model_filename, custom_objects={
        'SubPixelUpscaling': SubPixelUpscaling
    })
    print model.layers
    # load the images and bucket them by shape
    images_by_size = defaultdict(list)
    for filename in glob.glob(args.image_glob):
        img = Image.open(filename)
        img = img.resize(map(lambda x: int(x * args.output_scale), img.size))  # scale up
        images_by_size[img.size].append(img)
    # apply the model to the images
    for size, imgs in images_by_size.items():
        images = map(img_to_array, imgs)
        images = (np.stack(images) / 127.5) - 1.
        # NOTE: :(
        x = input_layer = Input(shape=images.shape[1:])
        for layer in model.layers[1:]:
            x = layer(x)
        this_model = Model([input_layer], [x])
        this_model.compile(optimizer='sgd', loss='mse')
        # END :(
        new_images = images
        for _ in range(args.apply_n):
            new_images = this_model.predict(new_images, verbose=False)
        # save before/after images
        for i in range(new_images.shape[0]):
            new_image = new_images[i]
            image = images[i]
            samples = np.stack([image, new_image])
            filename = '{}_{}.png'.format(size, i)
            filename = os.path.join(args.output_path, filename)
            print('saving sample', samples.shape, filename)
            save_sample_grid(samples, filename)
def extract_features(ids, path, output_path, extractor, batch_size=64):
    images_names = dict()
    for p in listdir(path):
        image_id = int(p.split('_')[-1].split('.')[0])
        if image_id in ids:
            images_names[image_id] = p
    batch,names = [],[]
    with open(output_path,'w') as output_file:
        for idx,n in enumerate(images_names):
            p = join(path, images_names[n])
            batch.append(load_image(p))
            names.append(n)
            if len(batch)==batch_size:
                batch = np.stack(batch)
                feed_dict = {images: batch}
                with tf.device('/gpu:0'):
                    features = sess.run(extractor, feed_dict=feed_dict)
                for n,f in zip(names,features):
                    output_file.write("%s;%s\n" % (n, " ".join(str(x) for x in f)))
                print("%d/%d" % (idx,len(images_names)))
                batch, names = [],[]
                output_file.flush()
        if len(batch)>0:
            batch = np.stack(batch)
            feed_dict = {images: batch}
            with tf.device('/gpu:0'):
                features = sess.run(extractor, feed_dict=feed_dict)
            for n,f in zip(names,features):
                output_file.write("%s;%s\n" % (n, " ".join(str(x) for x in f)))
            print("%d/%d" % (idx,len(images_names)))
            output_file.flush()
Beispiel #31
0
    # Given the weight of this classifier, cast votes to the corresponding class
    pred = tree.get_predictions(X_train, y_train, X_test)

    for i, y in enumerate(pred):
        if y == 1:
            current = votes_1[i]
            votes_1[i] = current + tree_weight

        else:
            current = votes_2[i]
            votes_2[i] = current + tree_weight
    print("Tree no. {}: \n\t1: {} \n\t2: {}".format(j, votes_1, votes_2))
    # print("Got votes from classifier {}.".format(j))

votes = np.stack((votes_1, votes_2), axis=1)/VOTE_SCALING_FACTOR
y_pred = list(np.argmax(votes, axis=1) + 1)


# y_pred = get_predictions(X_train, y_train, X_test)

# Arrange answer in two columns. First column (with header "Id") is an
# enumeration from 0 to n-1, where n is the number of test points. Second
# column (with header "EpiOrStroma" is the predictions.

test_header = "Id,EpiOrStroma"
n_points = X_test.shape[0]
y_pred_pp = np.ones((n_points, 2))
y_pred_pp[:, 0] = range(n_points)
y_pred_pp[:, 1] = y_pred
file_name = 'submissions/my_submission_ADA_{}_a.csv'.format(NO_OF_CLASSIFIERS)
def extract_2d_blocks_training(inputul, outputul, iteration, block_size_input,
                               block_size_output, dim_output):

    ## inputul -- shape (num_batch, width, height, num_imaging_modalities)
    ## outputul -- shape (num_batch, width, height, num_imaging_modalitie)

    #### this will extract 4 training examples ####

    lista = np.arange(inputul.shape[0])
    np.random.seed(iteration)
    np.random.shuffle(lista)
    current_index = lista[:2]
    semi_block_size_input = int(block_size_input // 2)
    semi_block_size_input2 = block_size_input - semi_block_size_input
    semi_block_size_output = int(block_size_output // 2)
    semi_block_size_output2 = block_size_output - semi_block_size_output
    list_blocks_input = []
    list_blocks_segmentation = []

    for _ in current_index:

        ##### iterating over 2D images #####
        ### pad current input and output scan to avoid problems ####

        current_input = inputul[_, ...]
        current_output = outputul[_, ...]

        #### shape of current scan ####
        current_shape = inputul[_, ...].shape

        #################################################################################################################
        #### random places being extracted -- most likely not containing any segmentation besides background class ######
        #################################################################################################################

        list_of_random_places1 = random.sample(
            range(semi_block_size_output,
                  current_shape[0] - semi_block_size_output2), 2)
        list_of_random_places2 = random.sample(
            range(semi_block_size_output,
                  current_shape[1] - semi_block_size_output2), 2)

        for __ in range(2):

            #### iterate over the 2 locations of the 3D cubes #####
            central_points = [
                list_of_random_places1[__], list_of_random_places2[__]
            ]

            current_input_padded, current_output_padded, central_points = check_and_add_zero_padding_2d_image(
                current_input, current_output, central_points,
                semi_block_size_input, semi_block_size_input2)

            list_blocks_segmentation.append(
                crop_2D_block(current_output_padded, central_points,
                              semi_block_size_output, semi_block_size_output2))
            list_blocks_input.append(
                crop_2D_block(current_input_padded, central_points,
                              semi_block_size_input, semi_block_size_input2))

        ###############################################################################################
        ##### specifically extract 2D patches with a non-background class #############################
        ###############################################################################################

        #########################
        ##### Class number 1 ####
        #########################

        indices_tumor = np.where(current_output[..., 0] == 1.0)
        indices_tumor_dim1 = indices_tumor[0]
        indices_tumor_dim2 = indices_tumor[1]

        if len(indices_tumor_dim1) == 0:

            print('tumor not found')

        else:

            list_of_random_places = random.sample(
                range(0, len(indices_tumor_dim1)), 2)

            for __ in range(2):

                central_points = [
                    indices_tumor_dim1[list_of_random_places[__]],
                    indices_tumor_dim2[list_of_random_places[__]]
                ]

                current_input_padded, current_output_padded, central_points = check_and_add_zero_padding_2d_image(
                    current_input, current_output, central_points,
                    semi_block_size_input, semi_block_size_input2)

                list_blocks_segmentation.append(
                    crop_2D_block(current_output_padded, central_points,
                                  semi_block_size_output,
                                  semi_block_size_output2))
                list_blocks_input.append(
                    crop_2D_block(current_input_padded, central_points,
                                  semi_block_size_input,
                                  semi_block_size_input2))

    list_blocks_input = np.stack(list_blocks_input)
    list_blocks_segmentation = np.stack(list_blocks_segmentation)

    shape_of_seg = list_blocks_segmentation.shape
    list_blocks_segmentation = list_blocks_segmentation.reshape((-1, 1))
    #list_blocks_segmentation = output_transformation(list_blocks_segmentation)
    #enc = preprocessing.OneHotEncoder()
    #enc.fit(list_blocks_segmentation)
    #list_blocks_segmentation = enc.transform(list_blocks_segmentation).toarray()
    #list_blocks_segmentation = list_blocks_segmentation.reshape((-1,1))
    list_blocks_segmentation = OneHotEncoder(list_blocks_segmentation)
    list_blocks_segmentation = list_blocks_segmentation.reshape(
        (shape_of_seg[0], shape_of_seg[1], shape_of_seg[2], dim_output))

    return list_blocks_input, list_blocks_segmentation
def xyz2uv(xyz):
    c = np.sqrt((xyz[..., :2]**2).sum(-1))
    u = np.arctan2(xyz[..., 1], xyz[..., 0])
    v = np.arctan2(xyz[..., 2], c)
    return np.stack([u, v], axis=-1)
def uv2xyz(uv):
    sin_u = np.sin(uv[..., 0])
    cos_u = np.cos(uv[..., 0])
    sin_v = np.sin(uv[..., 1])
    cos_v = np.cos(uv[..., 1])
    return np.stack([cos_v * cos_u, cos_v * sin_u, sin_v], axis=-1)
def genuv(h, w):
    u, v = np.meshgrid(np.arange(w), np.arange(h))
    u = (u + 0.5) * 2 * np.pi / w - np.pi
    v = (v + 0.5) * np.pi / h - np.pi / 2
    return np.stack([u, v], axis=-1)
def main(_):
  assert FLAGS.model_dirs_or_checkpoints

  if not tf.gfile.Exists(FLAGS.output_dir):
    tf.gfile.MakeDirs(FLAGS.output_dir)

  if (FLAGS.operation == "average_last_n" and
      len(FLAGS.model_dirs_or_checkpoints) > 1):
    raise ValueError("Need only 1 directory for %s operation" % FLAGS.operation)

  checkpoints = []

  for path in FLAGS.model_dirs_or_checkpoints:
    if tf.gfile.IsDirectory(path):
      # Grab the latest checkpoint for all the provided model dirs
      checkpoint_state = tf.train.get_checkpoint_state(path)
      if FLAGS.operation == "average_last_n":
        ckpt_paths = tf.io.gfile.glob(os.path.join(path, "model.ckpt*index"))
        def sort_fn(ckpt):
          return int(re.sub(".*ckpt-", "", ckpt))

        ckpts = sorted([c.replace(".index", "") for c in ckpt_paths],
                       key=sort_fn)
        checkpoints.extend(ckpts[-FLAGS.number_of_checkpoints:])
      else:
        checkpoints.append(checkpoint_state.all_model_checkpoint_paths[-1])
    else:
      if FLAGS.operation == "average_last_n":
        raise ValueError("need a directory while running %s operation" %
                         FLAGS.operation)
      checkpoints.append(path)

  logging.info("Using checkpoints %s", checkpoints)

  if FLAGS.operation in ["ensemble", "average", "average_last_n"]:
    if len(checkpoints) == 1:
      raise ValueError("no point in ensebling/averaging one checkpoint")
  else:
    if len(checkpoints) != 1:
      raise ValueError(
          "operation %s requires exactly one checkpoint" % FLAGS.operation)

  var_values = {}
  var_dtypes = {}

  for i in range(0, len(checkpoints)):
    checkpoint = checkpoints[i]
    logging.info("loading checkpoint %s", checkpoint)
    reader = tf.train.load_checkpoint(checkpoint)
    var_list = tf.train.list_variables(checkpoint)
    for (name, _) in var_list:
      if i:
        assert name in var_values
        tensor = reader.get_tensor(name)
        assert tensor.dtype == var_dtypes[name]
        var_values[name].append(tensor)
      else:
        tensor = reader.get_tensor(name)
        var_dtypes[name] = tensor.dtype
        var_values[name] = [tensor]
        if not FLAGS.global_step:
          if name == "global_step":
            FLAGS.global_step = tensor

    logging.info("Read from checkpoint %s", checkpoint)

  # stack the list of tensors along the 0th dimension.
  for name, tensors in var_values.items():
    tensor = tensors[0]
    if name == "global_step":
      new_val = np.int32(FLAGS.global_step)
    elif FLAGS.operation == "ensemble":
      new_val = np.stack(tensors)
    elif FLAGS.operation == "autoensemble":
      new_val = np.stack([tensor] * FLAGS.autoensemble_size)
    elif FLAGS.operation == "average" or FLAGS.operation == "average_last_n":
      new_val = average_tensors(tensors)
    elif FLAGS.operation == "extract_first":
      new_val = tensor[0]
    else:
      raise ValueError("unknown FLAGS.operation=%s" % FLAGS.operation)
    var_values[name] = new_val

  with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
    tf_vars = [
        tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])
        for v in var_values
    ]

  placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
  assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
  saver = tf.train.Saver(tf.all_variables())

  output_file = "model.ckpt-" + str(FLAGS.global_step)
  output_path = os.path.join(FLAGS.output_dir, output_file)

  # Build a model consisting only of variables, set them to the average values.
  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for p, assign_op, (name, value) in zip(placeholders, assign_ops,
                                           six.iteritems(var_values)):
      sess.run(assign_op, {p: value})
    # Use the built saver to save the averaged checkpoint.
    saver.save(sess, output_path)

  logging.info("Transformed checkpoints saved in %s", output_path)
Beispiel #37
0
def main():
    if args.dataset=='robotic_instrument':
        from datasets.robotic_instrument import get_testloader, RoboticInstrument
        if args.task=='binary':
            num_classes = 2
        elif args.task=='parts':
            num_classes = 5
        elif args.task=='type':
            num_classes = 8
        dataset = RoboticInstrument(args.task, 'test')
        test_loader = get_testloader(args.task, batch_size=args.batch_size)
        net_param = {"class_num"   : num_classes,
                     "in_chns"     : 3,
                     "bilinear"    : True,
                     "feature_chns": [16, 32, 64, 128, 256],
                     "dropout"     : [0.0, 0.0, 0.3, 0.4, 0.5]}
    elif args.dataset=='covid19_lesion':
        from datasets.covid19_lesion import get_testloader, Covid19Dataset
        dataset = Covid19Dataset(args.task, 'test')
        test_loader = get_testloader(args.task, batch_size=args.batch_size)
        num_classes = 2
        net_param = {"class_num"   : num_classes,
                     "in_chns"     : 1,
                     "bilinear"    : True,
                     "feature_chns": [16, 32, 64, 128, 256],
                     "dropout"     : [0.0, 0.0, 0.3, 0.4, 0.5]}
    else:
        raise NotImplementedError('The dataset is not supported.')
    
    net = COPLENet(net_param).cuda()
    optimizer.load_weights(net, None, None, args.snapshot, False)    
    torch.cuda.empty_cache()

    net.eval()
    hist = 0
    predictions = []
    groundtruths = []
    for test_idx, data in enumerate(test_loader):
        inputs, gts = data 
        assert len(inputs.size()) == 4 and len(gts.size()) == 3
        assert inputs.size()[2:] == gts.size()[1:]
        inputs, gts = inputs.cuda(), gts.cuda()
        with torch.no_grad():
            output = net(inputs)
        del inputs
        assert output.size()[2:] == gts.size()[1:]
        assert output.size()[1] == num_classes
        
        prediction = output.data.max(1)[1].cpu()
        predictions.append(output.data.cpu().numpy())
        groundtruths.append(gts.cpu().numpy())
        hist += fast_hist(prediction.numpy().flatten(), gts.cpu().numpy().flatten(),
                             num_classes)
        del gts, output, test_idx, data
    
    predictions = np.concatenate(predictions, axis=0)
    groundtruths = np.concatenate(groundtruths, axis=0)
    if args.dump_imgs:
        assert len(dataset)==predictions.shape[0]
    
        dump_dir = './dump_' + args.dataset + '_' + args.task + '_' + args.method
        os.makedirs(dump_dir, exist_ok=True)
        for i in range(len(dataset)):
            img = skimage.io.imread(dataset.img_paths[i])
            if len(img.shape)==2:
                img = np.stack((img, img, img), axis=2)
            img = skimage.transform.resize(img, (224,336))
            cm = np.argmax(predictions[i,:,:,:], axis=0)
            color_cm = add_color(cm)
            color_cm = skimage.transform.resize(color_cm, (224,336))
            gt = np.asarray(groundtruths[i,:,:], np.uint8)
            color_gt = add_color(gt)
            color_gt = skimage.transform.resize(color_gt, (224,336))
            blend_pred = 0.5 * img + 0.5 * color_cm
            blend_gt = 0.5 * img + 0.5 * color_gt
            blend_pred = np.asarray(blend_pred*255, np.uint8)
            blend_gt = np.asarray(blend_gt*255, np.uint8)
            #skimage.io.imsave(os.path.join(dump_dir, 'img_{:03d}.png'.format(i)), img)
            skimage.io.imsave(os.path.join(dump_dir, 'pred_{:03d}.png'.format(i)), blend_pred)
            skimage.io.imsave(os.path.join(dump_dir, 'gt_{:03d}.png'.format(i)), blend_gt)
            if i > 20:
                break
    
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    iou = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))    
    id2cat = {i: i for i in range(len(iou))}
    iou_false_positive = hist.sum(axis=1) - np.diag(hist)
    iou_false_negative = hist.sum(axis=0) - np.diag(hist)
    iou_true_positive = np.diag(hist)

    print('IoU:')
    print('label_id      label    IoU    Precision Recall TP       FP      FN      Pixel Acc.')
    for idx, i in enumerate(iou):
        idx_string = "{:2d}".format(idx)
        class_name = "{:>13}".format(id2cat[idx]) if idx in id2cat else ''
        iou_string = '{:5.1f}'.format(i * 100)
        total_pixels = hist.sum()
        tp = '{:5.1f}'.format(100 * iou_true_positive[idx] / total_pixels)
        fp = '{:5.1f}'.format(100 * iou_false_positive[idx] / total_pixels)
        fn = '{:5.1f}'.format(100 * iou_false_negative[idx] / total_pixels)
        precision = '{:5.1f}'.format(
            iou_true_positive[idx] / (iou_true_positive[idx] + iou_false_positive[idx]))
        recall = '{:5.1f}'.format(
            iou_true_positive[idx] / (iou_true_positive[idx] + iou_false_negative[idx]))
        pixel_acc = '{:5.1f}'.format(100*acc_cls[idx])
        print('{}    {}   {}  {}     {}  {}   {}   {}   {}'.format(
            idx_string, class_name, iou_string, precision, recall, tp, fp, fn, pixel_acc))
def main():
    
    np.set_printoptions(formatter={'float_kind':lambda x: "%.2f" % x})


    env = envstandalone.BlockArrange()

    # Standard q-learning parameters
    max_timesteps=800
    exploration_fraction=0.3
    exploration_final_eps=0.1
    gamma=.90
    num_cpu = 16

    # Used by buffering and DQN
    learning_starts=100
    buffer_size=1000
    batch_size=10
    target_network_update_freq=1
    train_freq=1
    print_freq=1
    lr=0.0003

    # first two elts of deicticShape must be odd
    actionShape = (3,3,3)
    memoryShape = (3,3,3)
    stateActionShape = (3,3,6) # includes place memory
    num_states = 2 # either holding or not
    num_patches = env.maxSide**2
    num_actions_discrete = 3 # pick/place/look
    num_actions = num_actions_discrete*num_patches
    num_cascade = 3
#    valueFunctionType = "TABULAR"
    valueFunctionType = "DQN"
#    actionSelectionStrategy = "UNIFORM_RANDOM" # actions are selected randomly from collection of all actions
    actionSelectionStrategy = "RANDOM_UNIQUE" # each unique action descriptor has equal chance of being selected

    DEBUG = False
#    DEBUG = True

    episode_rewards = [0.0]
    
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

#    prioritized_replay=True
    prioritized_replay=False
#    prioritized_replay_alpha=1.0
    prioritized_replay_alpha=0.6
    prioritized_replay_beta0=0.4
    prioritized_replay_beta_iters=None
#    prioritized_replay_beta_iters=20000
    prioritized_replay_eps=1e-6
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = max_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    beta = 1

    q_func = models.cnn_to_mlp(
#    q_func = models.cnn_to_mlp_2pathways(
#        convs=[(16,3,1), (32,3,1)],
#        hiddens=[48],
        convs=[(32,3,1)],
        hiddens=[48],
#        convs=[(48,3,1)],
#        hiddens=[48],
        dueling=True
    )

    def make_obs_ph(name):
        return U.BatchInput(env.observation_space.spaces[0].shape, name=name)

    def make_deic_ph(name):
        return U.BatchInput(stateActionShape, name=name)

    def make_target_ph(name):
        return U.BatchInput([1], name=name)
#        return U.BatchInput([num_cascade], name=name)

    def make_weight_ph(name):
        return U.BatchInput([1], name=name)
#        return U.BatchInput([num_cascade], name=name)

    getMoveActionDescriptors = build_getMoveActionDescriptors(make_obs_ph=make_obs_ph,deicticShape=actionShape)
    
    if valueFunctionType == 'DQN':
        getqNotHolding1 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq",qscope="q_func_notholding")
        getqHolding1 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq",qscope="q_func_holding")
        targetTrainNotHolding1 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq", qscope="q_func_notholding",grad_norm_clipping=1.)
        targetTrainHolding1 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq",qscope="q_func_holding",grad_norm_clipping=1.)
        
#        getqNotHolding2 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq2",qscope="q_func_notholding2")
#        getqHolding2 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq2",qscope="q_func_holding2")
#        targetTrainNotHolding2 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq2", qscope="q_func_notholding2",grad_norm_clipping=1.)
#        targetTrainHolding2 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq2",qscope="q_func_holding2",grad_norm_clipping=1.)
#        
#        getqNotHolding3 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq3",qscope="q_func_notholding3")
#        getqHolding3 = build_getq(make_deic_ph=make_deic_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,scope="deepq3",qscope="q_func_holding3")
#        targetTrainNotHolding3 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq3", qscope="q_func_notholding3",grad_norm_clipping=1.)
#        targetTrainHolding3 = build_targetTrain(make_deic_ph=make_deic_ph,make_target_ph=make_target_ph,make_weight_ph=make_weight_ph,q_func=q_func,num_states=num_states,num_cascade=num_cascade,optimizer=tf.train.AdamOptimizer(learning_rate=lr),scope="deepq3",qscope="q_func_holding3",grad_norm_clipping=1.)
        
    sess = U.make_session(num_cpu)
    sess.__enter__()

    obs = copy.deepcopy(env.reset())
    grid_t = obs[0]
#    grid_t = np.int32(obs[0]>0)
    stateHolding_t = np.int32(obs[1] > 0)
    memory_t = np.zeros([1, memoryShape[0], memoryShape[1], memoryShape[2]]) # first col is pick, second is place, third is look
#    memory_t[0,:,:,2] = (env.pickBlockGoal + 2) * np.ones([memoryShape[1], memoryShape[2]]) # DEBUG
    
    episode_rewards = [0.0]
    timerStart = time.time()
    U.initialize()

    if DEBUG:
        saver = tf.train.Saver()
        saver.restore(sess, "./temp")

    for t in range(max_timesteps):

        # Get state/action descriptors
        moveDescriptors = getMoveActionDescriptors([grid_t])
        moveDescriptors[moveDescriptors == 0] = -1
        actionsPickDescriptors = np.stack([moveDescriptors, np.zeros(np.shape(moveDescriptors)), np.zeros(np.shape(moveDescriptors))],axis=3)
        actionsPlaceDescriptors = np.stack([np.zeros(np.shape(moveDescriptors)),moveDescriptors, np.zeros(np.shape(moveDescriptors))],axis=3)
        actionsLookDescriptors = np.stack([np.zeros(np.shape(moveDescriptors)), np.zeros(np.shape(moveDescriptors)), moveDescriptors],axis=3)
        actionDescriptors = np.r_[actionsPickDescriptors,actionsPlaceDescriptors,actionsLookDescriptors]
        memoryTiled = np.repeat(memory_t,num_patches*num_actions_discrete,axis=0)
        stateActionDescriptors = np.concatenate([actionDescriptors, memoryTiled],axis=3)

        # Get current values
        qCurrNotHolding = getqNotHolding1(stateActionDescriptors)
        qCurrHolding = getqHolding1(stateActionDescriptors)
#        qCurrNotHolding = getqNotHolding3(stateActionDescriptors)
#        qCurrHolding = getqHolding3(stateActionDescriptors)
        qCurr = np.concatenate([qCurrNotHolding,qCurrHolding],axis=1)

        # Select action
        qCurrNoise = qCurr + np.random.random(np.shape(qCurr))*0.01 # add small amount of noise to break ties randomly
        if actionSelectionStrategy == "UNIFORM_RANDOM":
            action = np.argmax(qCurrNoise[:,stateHolding_t])
            if np.random.rand() < exploration.value(t):
                action = np.random.randint(num_actions)
        elif actionSelectionStrategy == "RANDOM_UNIQUE":
            _,idx,inv = np.unique(actionDescriptors,axis=0,return_index=True,return_inverse=True)
            actionIdx = np.argmax(qCurrNoise[idx,stateHolding_t])
            
#            if not DEBUG:
#                if np.random.rand() < exploration.value(t):
            actionIdx = np.random.randint(len(idx))
            
            actionsSelected = np.nonzero(inv==actionIdx)[0]
            action = actionsSelected[np.random.randint(len(actionsSelected))]
        else:
            print("Error...")

        # Take action
        new_obs, rew, done, _ = env.step(action)
        
        # Update state and memory
        grid_tp1 = new_obs[0]
#        grid_tp1 = np.int32(new_obs[0]>0)
        stateHolding_tp1= np.int32(new_obs[1] > 0)
        memory_tp1 = np.copy(memory_t)
        if (stateHolding_t == 0) and (stateHolding_tp1 != 0): # if a block has been picked
            memory_tp1[:,:,:,0] = np.reshape(stateActionDescriptors[action][:,:,0],[1,stateActionShape[0],stateActionShape[1]])
        if (stateHolding_t > 0) and (stateHolding_tp1 == 0): # if a block has just been placed
            memory_tp1[:,:,:,1] = np.reshape(stateActionDescriptors[action][:,:,1],[1,stateActionShape[0],stateActionShape[1]])
        if action > num_patches*2: # if this is a look action
#            memory_tp1[:,:,:,2] = np.reshape(stateActionDescriptors[action][:,:,2],[1,stateActionShape[0],stateActionShape[1]])
#            memory_tp1[0,:,:,2] = (env.pickBlockGoal + 2) * np.ones([memoryShape[1], memoryShape[2]]) # DEBUG
            if (env.pickBlockGoal + 2) in stateActionDescriptors[action][:,:,2]:
                memory_tp1[0,:,:,2] = (env.pickBlockGoal + 2) * np.ones([memoryShape[1], memoryShape[2]])

        if DEBUG:
            env.render()
            print("memory: ")
            print(str(memory_tp1))
            print("action: " + str(action))
            print("action descriptor:")
            if action < num_patches:
                print(stateActionDescriptors[action][:,:,0])
            elif action < 2*num_patches:
                print(stateActionDescriptors[action][:,:,1])
            else:
                print(stateActionDescriptors[action][:,:,2])

#        memory_tp1[0,:,:,2] = (env.pickBlockGoal + 2) * np.ones([memoryShape[1], memoryShape[2]]) # DEBUG

        # Add to replay buffer
        replay_buffer.add(stateHolding_t, stateActionDescriptors[action,:], rew, stateHolding_tp1, grid_tp1, memory_tp1[0], done)
        
        # handle end of episode
        if done:
            new_obs = env.reset()
            grid_tp1 = new_obs[0]
            stateHolding_tp1= np.int32(new_obs[1] > 0)
            memory_tp1 = np.zeros([1, memoryShape[0], memoryShape[1], memoryShape[2]])
#            memory_tp1[0,:,:,2] = (env.pickBlockGoal + 2) * np.ones([memoryShape[1], memoryShape[2]]) # DEBUG

        # Set tp1 equal to t
        stateHolding_t = stateHolding_tp1
        grid_t = grid_tp1
        memory_t = memory_tp1
        
        
        if t > learning_starts and t % train_freq == 0:

            # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
            if prioritized_replay:
                beta=beta_schedule.value(t)
                states_t, actionPatches, rewards, images_tp1, states_tp1, placeMemory_tp1, dones, weights, batch_idxes = replay_buffer.sample(batch_size, beta)
            else:
                statesDiscrete_t, stateActionsImage_t, rewards, statesDiscrete_tp1, grids_tp1, memories_tp1, dones = replay_buffer.sample(batch_size)
                weights, batch_idxes = np.ones_like(rewards), None

            moveDescriptorsNext = getMoveActionDescriptors(grids_tp1)
            moveDescriptorsNext[moveDescriptorsNext == 0] = -1
            
            actionsPickDescriptorsNext = np.stack([moveDescriptorsNext, np.zeros(np.shape(moveDescriptorsNext)), np.zeros(np.shape(moveDescriptorsNext))],axis=3)
            actionsPlaceDescriptorsNext = np.stack([np.zeros(np.shape(moveDescriptorsNext)), moveDescriptorsNext, np.zeros(np.shape(moveDescriptorsNext))],axis=3)
            actionsLookDescriptorsNext = np.stack([np.zeros(np.shape(moveDescriptorsNext)), np.zeros(np.shape(moveDescriptorsNext)), moveDescriptorsNext],axis=3)
            actionDescriptorsNext = np.stack([actionsPickDescriptorsNext, actionsPlaceDescriptorsNext, actionsLookDescriptorsNext], axis=1) # I sometimes get this axis parameter wrong... pay attention!
            actionDescriptorsNext = np.reshape(actionDescriptorsNext,[batch_size*num_patches*num_actions_discrete,actionShape[0],actionShape[1],actionShape[2]])

            # Augment with state, i.e. place memory
            placeMemory_tp1_expanded = np.repeat(memories_tp1,num_patches*num_actions_discrete,axis=0)
            actionDescriptorsNext = np.concatenate([actionDescriptorsNext, placeMemory_tp1_expanded],axis=3)
            
            qNextNotHolding = getqNotHolding1(actionDescriptorsNext)
            qNextHolding = getqHolding1(actionDescriptorsNext)
            qNextFlat = np.concatenate([qNextNotHolding,qNextHolding],axis=1)

            qNext = np.reshape(qNextFlat,[batch_size,num_patches,num_actions_discrete,num_states])
            qNextmax = np.max(np.max(qNext[range(batch_size),:,:,statesDiscrete_tp1],2),1)
            targets = rewards + (1-dones) * gamma * qNextmax
            
            if any(targets > 11):
                targets
            
            if t > 750:
                qNext

            # avg value
            qCurrTargetNotHolding = getqNotHolding1(stateActionsImage_t)
            qCurrTargetHolding = getqHolding1(stateActionsImage_t)
            qCurrTarget = np.concatenate([qCurrTargetNotHolding,qCurrTargetHolding],axis=1)
            qCurrTarget[range(batch_size),statesDiscrete_t] = targets
            targetTrainNotHolding1(stateActionsImage_t, np.reshape(qCurrTarget[:,0],[batch_size,1]), np.reshape(weights,[batch_size,1]))
            targetTrainHolding1(stateActionsImage_t, np.reshape(qCurrTarget[:,1],[batch_size,1]), np.reshape(weights,[batch_size,1]))

#            # cascaded value
#            qCurrTargetNotHolding1 = getqNotHolding1(stateActionsImage_t)
#            qCurrTargetHolding1 = getqHolding1(stateActionsImage_t)
#            qCurrTarget1 = np.concatenate([qCurrTargetNotHolding1,qCurrTargetHolding1],axis=1)
#            qCurrTargetNotHolding2 = getqNotHolding2(stateActionsImage_t)
#            qCurrTargetHolding2 = getqHolding2(stateActionsImage_t)
#            qCurrTarget2 = np.concatenate([qCurrTargetNotHolding2,qCurrTargetHolding2],axis=1)
#            qCurrTargetNotHolding3 = getqNotHolding3(stateActionsImage_t)
#            qCurrTargetHolding3 = getqHolding3(stateActionsImage_t)
#            qCurrTarget3 = np.concatenate([qCurrTargetNotHolding3,qCurrTargetHolding3],axis=1)
#            
#            mask2Idx = np.nonzero(targets < qCurrTarget1[range(batch_size),statesDiscrete_t])[0]
#            mask3Idx = np.nonzero(targets < qCurrTarget2[range(batch_size),statesDiscrete_t])[0]
#            qCurrTarget1[range(batch_size),statesDiscrete_t] = targets
#            qCurrTarget2[mask2Idx,statesDiscrete_t[mask2Idx]] = targets[mask2Idx]
#            qCurrTarget3[mask3Idx,statesDiscrete_t[mask3Idx]] = targets[mask3Idx]
#            
#            targetTrainNotHolding1(stateActionsImage_t, np.reshape(qCurrTarget1[:,0],[batch_size,1]), np.ones([batch_size,1]))
#            targetTrainHolding1(stateActionsImage_t, np.reshape(qCurrTarget1[:,1],[batch_size,1]), np.ones([batch_size,1]))
#            targetTrainNotHolding2(stateActionsImage_t, np.reshape(qCurrTarget2[:,0],[batch_size,1]), np.ones([batch_size,1]))
#            targetTrainHolding2(stateActionsImage_t, np.reshape(qCurrTarget2[:,1],[batch_size,1]), np.ones([batch_size,1]))
#            targetTrainNotHolding3(stateActionsImage_t, np.reshape(qCurrTarget3[:,0],[batch_size,1]), np.ones([batch_size,1]))
#            targetTrainHolding3(stateActionsImage_t, np.reshape(qCurrTarget3[:,1],[batch_size,1]), np.ones([batch_size,1]))

            if prioritized_replay:
                new_priorities = np.abs(td_error) + prioritized_replay_eps
                replay_buffer.update_priorities(batch_idxes, new_priorities)



        # bookkeeping for storing episode rewards
        episode_rewards[-1] += rew
        if done:
#            new_obs = env.reset()
            episode_rewards.append(0.0)
        mean_100ep_reward = round(np.mean(episode_rewards[-51:-1]), 1)
        num_episodes = len(episode_rewards)
        if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
            timerFinal = time.time()
            print("steps: " + str(t) + ", episodes: " + str(num_episodes) + ", mean 100 episode reward: " + str(mean_100ep_reward) + ", % time spent exploring: " + str(int(100 * exploration.value(t))) + ", time elapsed: " + str(timerFinal - timerStart))
            timerStart = timerFinal
        
        obs = new_obs

    saver = tf.train.Saver()
    saver.save(sess, "./temp")

    # display value function
    obs = env.reset()
    moveDescriptors = getMoveActionDescriptors([obs[0]])
    moveDescriptors[moveDescriptors == 0] = -1
    actionsPickDescriptorsOrig = np.stack([moveDescriptors, np.zeros(np.shape(moveDescriptors)), np.zeros(np.shape(moveDescriptors))],axis=3)
    actionsLookDescriptorsOrig = np.stack([np.zeros(np.shape(moveDescriptors)), np.zeros(np.shape(moveDescriptors)), moveDescriptors],axis=3)
    
    memoryZeros = np.zeros([1, memoryShape[0], memoryShape[1], memoryShape[2]])
    memoryLooked3 = np.zeros([1, memoryShape[0], memoryShape[1], memoryShape[2]])
    memoryLooked3[0,:,:,2] = 3*np.ones([stateActionShape[0], stateActionShape[1]])
    memoryLooked4 = np.zeros([1, memoryShape[0], memoryShape[1], memoryShape[2]])
    memoryLooked4[0,:,:,2] = 4*np.ones([stateActionShape[0], stateActionShape[1]])
    
    print("\nGrid configuration:")
    print(str(obs[0][:,:,0]))
        
    for i in range(3):
        
        if i == 0:
            placeMemory = memoryZeros
            print("\nMemory has zeros:")
        elif i==1:
            placeMemory = memoryLooked3
            print("\nMemory encodes look=3:")
        else:
            placeMemory = memoryLooked4
            print("\nMemory encodes look=4:")
            
        placeMemoryTiled = np.repeat(placeMemory,num_patches,axis=0)
        actionsPickDescriptors = np.concatenate([actionsPickDescriptorsOrig, placeMemoryTiled],axis=3)
        actionsLookDescriptors = np.concatenate([actionsLookDescriptorsOrig, placeMemoryTiled],axis=3)
    
        qPickNotHolding1 = getqNotHolding1(actionsPickDescriptors)
        qLookNotHolding1 = getqNotHolding1(actionsLookDescriptors)
#        qPickNotHolding2 = getqNotHolding2(actionsPickDescriptors)
#        qLookNotHolding2 = getqNotHolding2(actionsLookDescriptors)
#        qPickNotHolding3 = getqNotHolding3(actionsPickDescriptors)
#        qLookNotHolding3 = getqNotHolding3(actionsLookDescriptors)
        
        print("\nValue function for pick action in hold-nothing state:")
        print(str(np.reshape(qPickNotHolding1,[8,8])))
#        print("***")
#        print(str(np.reshape(qPickNotHolding2,[8,8])))
#        print("***")
#        print(str(np.reshape(qPickNotHolding3,[8,8])))
    
        print("\nValue function for look action in hold-nothing state:")
        print(str(np.reshape(qLookNotHolding1,[8,8])))
Beispiel #39
0
def running_normalize(vid_path,
                      save_folder='./norm_images/',
                      order=3,
                      dark=None,
                      return_images=False):
    #get first frame of background
    vidObj = cv2.VideoCapture(vid_path)

    success, img0 = vidObj.read()
    if not success:
        print('Video not found')
        return

    nframes = int(vidObj.get(cv2.CAP_PROP_FRAME_COUNT))
    print(nframes, 'frames')

    if dark is None:
        print('Computing dark count')
        #get dark count
        samplecount = 100  #how many frames to sample (at random)
        subtract = 5  #offset dark count
        min_cand = []
        positions = np.random.choice(
            nframes, samplecount, replace=False)  #get random frames to sample
        for i in range(samplecount):
            vidObj.set(cv2.CAP_PROP_POS_FRAMES, positions[i])
            success, image = vidObj.read()
            if success:
                min_cand.append(image.min())
            else:
                print('Something went wrong')
        dark = min(min_cand) - subtract
    print('dark count:{}'.format(dark))

    #make save folder if it doesn't exist
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)

    success, img0 = vidObj.read()
    img0 = img0[:, :, 0]
    if not success:
        print('Video not found')
        return

    img_return = []
    success = 1
    count = 0
    vidObj.set(cv2.CAP_PROP_POS_FRAMES, count)
    frame = vidObj.get(cv2.CAP_PROP_POS_FRAMES)

    #instantiate vmedian object
    v = vmedian(order=order, shape=img0.shape)
    v.add(img0)
    while success:
        success, image = vidObj.read()
        if success:
            image = image[:, :, 0]
            if not v.initialized:
                v.add(image)
                continue
            bg = v.get()
            numer = image - dark
            denom = np.clip((bg - dark), 1, 255)
            testimg = np.divide(numer, denom) * 100.
            testimg = np.clip(testimg, 0, 255)
            filename = os.path.dirname(save_folder) + '/image' + str(
                count).zfill(4) + '.png'
            cv2.imwrite(filename, testimg)
            testimg = np.stack((testimg, ) * 3, axis=-1)
            if return_images:
                img_return.append(testimg)
            print(filename, end='\r')
            v.add(image)
            count += 1
    return img_return
        continue

    stacked_original = [
        np.concatenate((np.array(orig_flow), np.array(orig_result)), axis=-1)
        for orig_flow, orig_result in zip(orig_flows_by_attack_number_item,
                                          orig_results_by_attack_number_item)
    ]
    stacked_modified = [
        np.concatenate((np.array(modified_flow), np.array(modified_result)),
                       axis=-1) for modified_flow, modified_result in zip(
                           modified_flows_by_attack_number_item,
                           results_by_attack_number_item)
    ]

    seqs = [
        np.stack((orig, modified))
        for orig, modified in zip(stacked_original, stacked_modified)
    ]

    # Filter good seqs where the adversarial attack succeeded.
    filtered_seqs = [
        item for item in seqs
        if int(np.round(np.mean(numpy_sigmoid(item[0, -1:, -1])))) == 1
        and int(np.round(np.mean(numpy_sigmoid(item[1, -1:, -1])))) == 0
    ]

    print("Original seqs", len(seqs), "filtered seqs", len(filtered_seqs))
    seqs = filtered_seqs

    if len(filtered_seqs) <= 0:
        continue
Beispiel #41
0
            input_image_le_c = np.concatenate(
                [input_image_le, input_image_le_gamma], axis=2)

            output_image = cv2.cvtColor(
                cv2.imread(train_output_names_hdr[id], -1), cv2.COLOR_BGR2RGB)

            input_image_le_batch.append(
                np.expand_dims(input_image_le_c, axis=0))
            input_image_me_batch.append(
                np.expand_dims(input_image_me_c, axis=0))
            input_image_he_batch.append(
                np.expand_dims(input_image_he_c, axis=0))
            output_image_batch.append(np.expand_dims(output_image, axis=0))

        input_image_le_batch = np.squeeze(
            np.stack(input_image_le_batch, axis=1))
        input_image_me_batch = np.squeeze(
            np.stack(input_image_me_batch, axis=1))
        input_image_he_batch = np.squeeze(
            np.stack(input_image_he_batch, axis=1))
        output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))

        train_writer.add_summary(
            sess.run(le_image_summ,
                     feed_dict={le_image_pl: input_image_le_batch[..., :3]}),
            i)
        train_writer.add_summary(
            sess.run(me_image_summ,
                     feed_dict={me_image_pl: input_image_me_batch[..., :3]}),
            i)
        train_writer.add_summary(
    TracksAtECAL_dZSig = np.array(rhTree.ECAL_tracksDzSig_atECALfixIP).reshape(
        280, 360)
    TracksAtECAL_d0Sig = np.array(rhTree.ECAL_tracksD0Sig_atECALfixIP).reshape(
        280, 360)
    PixAtEcal_1 = np.array(rhTree.BPIX_layer1_ECAL_atPV).reshape(280, 360)
    PixAtEcal_2 = np.array(rhTree.BPIX_layer2_ECAL_atPV).reshape(280, 360)
    PixAtEcal_3 = np.array(rhTree.BPIX_layer3_ECAL_atPV).reshape(280, 360)
    PixAtEcal_4 = np.array(rhTree.BPIX_layer4_ECAL_atPV).reshape(280, 360)
    TibAtEcal_1 = np.array(rhTree.TIB_layer1_ECAL_atPV).reshape(280, 360)
    TibAtEcal_2 = np.array(rhTree.TIB_layer2_ECAL_atPV).reshape(280, 360)
    TobAtEcal_1 = np.array(rhTree.TOB_layer1_ECAL_atPV).reshape(280, 360)
    TobAtEcal_2 = np.array(rhTree.TOB_layer2_ECAL_atPV).reshape(280, 360)
    #X_CMSII            = np.stack([TracksAtECAL_pt, TracksAtECAL_dZSig, TracksAtECAL_d0Sig, ECAL_energy, HBHE_energy], axis=0) # (5, 280, 360)
    X_CMSII = np.stack([
        TracksAtECAL_pt, TracksAtECAL_dZSig, TracksAtECAL_d0Sig, ECAL_energy,
        HBHE_energy, PixAtEcal_1, PixAtEcal_2, PixAtEcal_3, PixAtEcal_4,
        TibAtEcal_1, TibAtEcal_2, TobAtEcal_1, TobAtEcal_2
    ],
                       axis=0)  # (13, 280, 360)
    #data['X_CMSII'] = np.stack([TracksAtECAL_pt, ECAL_energy, HBHE_energy], axis=0) # (3, 280, 360)
    #data['X_CMSII'] = np.stack([TracksAtECAL_pt, TracksAtECAL_dz, TracksAtECAL_d0, ECAL_energy], axis=0) # (4, 280, 360)
    #data['X_CMSII'] = np.stack([TracksAtECAL_pt, TracksAtECAL_dz, TracksAtECAL_d0, ECAL_energy, HBHE_energy, PixAtEcal_1, PixAtEcal_2, PixAtEcal_3, PixAtEcal_4], axis=0) # (9, 280, 360)

    # Jet attributes
    ys = rhTree.jetIsDiEle
    ams = rhTree.a_m
    apts = rhTree.a_pt
    #dRs    = rhTree.jetadR
    #pts    = rhTree.jetPt
    #m0s    = rhTree.jetM
    iphis = rhTree.jetSeed_iphi
    ietas = rhTree.jetSeed_ieta
def standard_test(input, layer, unit_index, preferred_stimulus):
    # Note: we put edge of square on centre of preferred-stimulus bar
    # Zhou et al. determined significance of the effects of contrast and border ownership with
    # a 3-factor ANOVA, significance .01. The factors were side-of-ownership, contrast polarity,
    # and time. Having no time component we use a two-factor ANOVA.
    # "In the standard test, sizes
    # of 4 or 6° were used for cells of V1 and V2, and sizes between 4 and 17°
    # were used for cells of V4, depending on response field size."
    # I don't see where they mention the number of reps per condition, but there are 10 reps
    # in Figure 4.

    colours = Colours()
    bg_colour_name = 'Light gray (background)'
    bg_colour = colours.get_RGB(bg_colour_name, 0)

    preferred_colour = preferred_stimulus['colour']

    square_shape = (100, 100)

    angle = preferred_stimulus['angle']
    rotation = [[np.cos(angle), -np.sin(angle)],
                [np.sin(angle), np.cos(angle)]]
    position_1 = np.add(np.dot(rotation, np.array(
        [-50, 0]).transpose()), [200, 200]).astype(np.int)
    position_2 = np.add(np.dot(rotation, np.array([50, 0]).transpose()), [
                        200, 200]).astype(np.int)

    # preferred_shape = (preferred_stimulus['width'], preferred_stimulus['length'])
    # add_rectangle(stimulus, (200,200), preferred_shape, angle, preferred_colour)

    # Stimuli as in panels A-D of Zhou et al. Figure 2
    stimulus_A = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A, position_1, square_shape, angle, bg_colour)

    stimulus_B = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B, position_2, square_shape, angle, preferred_colour)

    stimulus_C = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C, position_1, square_shape, angle, preferred_colour)

    stimulus_D = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D, position_2, square_shape, angle, bg_colour)
    
    # Stimulus of different size
    square_shape = (150, 150)
    
    stimulus_A2 = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_A2, position_1, square_shape, angle, bg_colour)

    stimulus_B2 = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_B2, position_2, square_shape, angle, preferred_colour)

    stimulus_C2 = get_image((400, 400, 3), bg_colour)
    add_rectangle(stimulus_C2, position_1, square_shape, angle, preferred_colour)

    stimulus_D2 = get_image((400, 400, 3), preferred_colour)
    add_rectangle(stimulus_D2, position_2, square_shape, angle, bg_colour)

    input_data = np.stack((stimulus_A, stimulus_B, stimulus_C, stimulus_D,
                           stimulus_A2, stimulus_B2, stimulus_C2, stimulus_D2))

    # print(input_data.shape)
    # plt.imshow(stimulus_D)
    # plt.show()

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        model_tf = sess.graph.get_tensor_by_name(layer)
        activities = sess.run(
            model_tf, feed_dict={input: input_data})

        centre = (int(activities.shape[1] / 2), int(activities.shape[2] / 2))
        responses = activities[:, centre[0], centre[1], unit_index]

    m = np.mean(responses[:4])
    m2 = np.mean(responses[4:])

    A, B, C, D, A2, B2, C2, D2 = responses
    side = np.abs((A+C)/2 - (B+D)/2) / m * 100
    side2 = np.abs((A2+C2)/2 - (B2+D2)/2) / m2 * 100

    return {'responses': responses, 'side': side, 'side2': side2, 'mean': m, 'mean2': m2}
import numpy as np
from matplotlib import pyplot as plt

from csxdata.stats import manova
from csxdata.visual import Plotter2D
from csxdata.utilities.highlevel import transform

from SciProjects.sophie import pull_data, axtitles

X_C, Y_C, DHI, D13C, CCode = pull_data("03GEO_pure.csv")

ingoes = np.stack((DHI, D13C), axis=1)

tX = transform(ingoes, 1, False, "lda", CCode)
F, p = manova(tX, CCode)

ttl = (r"2 $\sigma$ (kb. 95%), országokra illesztett konfidencia ellipszisek",
       "LDA + ANOVA: F = {:.2f}, p = {:.2f}, az eltérés {}szignifikáns".format(
           F, p, ("nem " if p > 0.05 else "")))
axlb = (axtitles["DHI"], axtitles["D13C"])

plot = Plotter2D(plt.figure(),
                 ingoes,
                 CCode,
                 title="\n".join(ttl),
                 axlabels=axlb)
plot.split_scatter(center=True, sigma=2, alpha=0.5)

plt.show()
    def make_adversarial_examples(cls, image, true_label, target_label, args,
                                  attack_norm, model_to_fool):
        '''
        The attack process for generating adversarial examples with priors.
        '''
        # Initial setup
        orig_images = image.clone()
        prior_size = IMAGE_SIZE[
            args.dataset][0] if not args.tiling else args.tile_size
        assert args.tiling == (args.dataset == "ImageNet")
        if args.tiling:
            upsampler = Upsample(size=(IMAGE_SIZE[args.dataset][0],
                                       IMAGE_SIZE[args.dataset][1]))
        else:
            upsampler = lambda x: x
        total_queries = torch.zeros(args.batch_size).cuda()
        prior = torch.zeros(args.batch_size, IN_CHANNELS[args.dataset],
                            prior_size, prior_size).cuda()
        dim = prior.nelement(
        ) / args.batch_size  # nelement() --> total number of elements
        prior_step = BanditAttack.gd_prior_step if attack_norm == 'l2' else BanditAttack.eg_step
        image_step = BanditAttack.l2_image_step if attack_norm == 'l2' else BanditAttack.linf_step
        proj_maker = BanditAttack.l2_proj if attack_norm == 'l2' else BanditAttack.linf_proj  # 调用proj_maker返回的是一个函数
        proj_step = proj_maker(orig_images, args.epsilon)
        # Loss function
        criterion = BanditAttack.cw_loss if args.loss == "cw" else BanditAttack.xent_loss
        # Original classifications
        orig_classes = model_to_fool(image).argmax(1).cuda()
        correct_classified_mask = (orig_classes == true_label).float()
        not_dones_mask = correct_classified_mask.clone()  # 分类分对的mask
        log.info("correct ratio : {:.3f}".format(
            correct_classified_mask.mean()))
        normalized_q1 = deque(maxlen=100)
        normalized_q2 = deque(maxlen=100)
        images = deque(maxlen=100)
        logits_q1_list = deque(maxlen=100)
        logits_q2_list = deque(maxlen=100)

        # 有选择的选择一个段落,比如说从中间开始截取一个段落
        assert args.max_queries // 2 >= 100
        slice_iteration_end = random.randint(100, args.max_queries // 2)
        for i in range(slice_iteration_end):
            if not args.nes:
                ## Updating the prior:
                # Create noise for exporation, estimate the gradient, and take a PGD step
                exp_noise = args.exploration * torch.randn_like(prior) / (
                    dim**0.5
                )  # parameterizes the exploration to be done around the prior
                exp_noise = exp_noise.cuda()
                # Query deltas for finite difference estimator
                q1 = upsampler(
                    prior + exp_noise
                )  # 这就是Finite Difference算法, prior相当于论文里的v,这个prior也会更新,把梯度累积上去
                q2 = upsampler(
                    prior -
                    exp_noise)  # prior 相当于累积的更新量,用这个更新量,再去修改image,就会变得非常准
                # Loss points for finite difference estimator
                logits_q1 = model_to_fool(image + args.fd_eta * q1 /
                                          BanditAttack.norm(q1))
                logits_q2 = model_to_fool(image + args.fd_eta * q2 /
                                          BanditAttack.norm(q2))
                l1 = criterion(logits_q1, true_label, target_label)
                l2 = criterion(logits_q2, true_label, target_label)
                if i >= slice_iteration_end - 100:
                    images.append(image.detach().cpu().numpy())
                    normalized_q1.append(
                        (args.fd_eta * q1 /
                         BanditAttack.norm(q1)).detach().cpu().numpy())
                    normalized_q2.append(
                        (args.fd_eta * q2 /
                         BanditAttack.norm(q2)).detach().cpu().numpy())
                    logits_q1_list.append(logits_q1.detach().cpu().numpy())
                    logits_q2_list.append(logits_q2.detach().cpu().numpy())

                # Finite differences estimate of directional derivative
                est_deriv = (l1 - l2) / (args.fd_eta * args.exploration
                                         )  # 方向导数 , l1和l2是loss
                # 2-query gradient estimate
                est_grad = est_deriv.view(-1, 1, 1,
                                          1) * exp_noise  # B, C, H, W,
                # Update the prior with the estimated gradient
                prior = prior_step(
                    prior, est_grad,
                    args.online_lr)  # 注意,修正的是prior,这就是bandit算法的精髓
            else:  # NES方法
                prior = torch.zeros_like(image).cuda()
                for grad_iter_t in range(args.gradient_iters):
                    exp_noise = torch.randn_like(image) / (dim**0.5)
                    logits_q1 = model_to_fool(image + args.fd_eta * exp_noise)
                    logits_q2 = model_to_fool(image - args.fd_eta * exp_noise)
                    l1 = criterion(logits_q1, true_label, target_label)
                    l2 = criterion(logits_q2, true_label, target_label)
                    est_deriv = (l1 - l2) / args.fd_eta
                    prior += est_deriv.view(-1, 1, 1, 1) * exp_noise
                    if i * args.gradient_iters + grad_iter_t >= slice_iteration_end - 100:
                        images.append(image.detach().cpu().numpy())
                        normalized_q1.append(
                            (args.fd_eta * exp_noise).detach().cpu().numpy())
                        normalized_q2.append(
                            (-args.fd_eta * exp_noise).detach().cpu().numpy())
                        logits_q1_list.append(logits_q1.detach().cpu().numpy())
                        logits_q2_list.append(logits_q2.detach().cpu().numpy())
                # Preserve images that are already done,
                # Unless we are specifically measuring gradient estimation
                prior = prior * not_dones_mask.view(-1, 1, 1, 1).cuda()

            ## Update the image:
            # take a pgd step using the prior
            new_im = image_step(
                image,
                upsampler(prior * correct_classified_mask.view(-1, 1, 1, 1)),
                args.image_lr)  # prior放大后相当于累积的更新量,可以用来更新
            image = proj_step(new_im)
            image = torch.clamp(image, 0, 1)

            ## Continue query count
            total_queries += 2 * args.gradient_iters * not_dones_mask  # gradient_iters是一个int值
            with torch.no_grad():
                adv_pred = model_to_fool(image).argmax(1)
            if args.targeted:
                not_dones_mask = not_dones_mask * (
                    1 - adv_pred.eq(target_label).float()
                ).float()  # not_done初始化为 correct, shape = (batch_size,)
            else:
                not_dones_mask = not_dones_mask * adv_pred.eq(
                    true_label).float()  # 只要是跟原始label相等的,就还需要query,还没有成功

            ## Logging stuff
            success_mask = (1 - not_dones_mask) * correct_classified_mask
            num_success = success_mask.sum()
            current_success_rate = (
                num_success.detach().cpu() /
                correct_classified_mask.detach().cpu().sum()).cpu().item()
            if num_success == 0:
                success_queries = 0
            else:
                success_queries = ((success_mask * total_queries).sum() /
                                   num_success).cpu().item()
            max_curr_queries = total_queries.max().cpu().item()
            # log.info("%d-th: Queries: %d | Success rate: %f | Average queries: %f" % (i, max_curr_queries, current_success_rate, success_queries))
            # if current_success_rate == 1.0:
            #     break

        normalized_q1 = np.ascontiguousarray(
            np.transpose(np.stack(list(normalized_q1)), axes=(1, 0, 2, 3, 4)))
        normalized_q2 = np.ascontiguousarray(
            np.transpose(np.stack(list(normalized_q2)), axes=(1, 0, 2, 3, 4)))
        images = np.ascontiguousarray(
            np.transpose(np.stack(list(images)), axes=(1, 0, 2, 3, 4)))
        logits_q1_list = np.ascontiguousarray(
            np.transpose(np.stack(list(logits_q1_list)),
                         axes=(1, 0, 2)))  # B,T,#class
        logits_q2_list = np.ascontiguousarray(
            np.transpose(np.stack(list(logits_q2_list)),
                         axes=(1, 0, 2)))  # B,T,#class

        return {
            'average_queries': success_queries,
            'num_correctly_classified':
            correct_classified_mask.sum().cpu().item(),
            'success_rate': current_success_rate,
            'images_orig': orig_images.cpu().numpy(),
            'images_adv': image.cpu().numpy(),
            'all_queries': total_queries.cpu().numpy(),
            'correctly_classified': correct_classified_mask.cpu().numpy(),
            'success': success_mask.cpu().numpy(),
            "q1": normalized_q1,
            "q2": normalized_q2,
            "images": images,
            "logits_q1": logits_q1_list,
            "logits_q2": logits_q2_list
        }
Beispiel #46
0
    def _log_images(self, num_images=36):
        validation_X = self.validation_data[0]
        validation_y = self.validation_data[1]

        validation_length = len(validation_X)

        if validation_length > num_images:
            # pick some data at random
            indices = np.random.choice(validation_length,
                                       num_images,
                                       replace=False)
        else:
            indices = range(validation_length)

        test_data = []
        test_output = []
        for i in indices:
            test_example = validation_X[i]
            test_data.append(test_example)
            test_output.append(validation_y[i])

        predictions = self.model.predict(np.stack(test_data))

        if self.input_type == 'label':
            if self.output_type in ('image', 'images', 'segmentation_mask'):
                captions = self._logits_to_captions(test_data)
                output_image_data = self._masks_to_pixels(
                    predictions
                ) if self.output_type == 'segmentation_mask' else predictions
                reference_image_data = self._masks_to_pixels(
                    test_output
                ) if self.output_type == 'segmentation_mask' else test_output
                output_images = [
                    wandb.Image(data, caption=captions[i], grouping=2)
                    for i, data in enumerate(output_image_data)
                ]
                reference_images = [
                    wandb.Image(data, caption=captions[i])
                    for i, data in enumerate(reference_image_data)
                ]
                return list(
                    chain.from_iterable(zip(output_images, reference_images)))
        elif self.input_type in ('image', 'images', 'segmentation_mask'):
            input_image_data = self._masks_to_pixels(
                test_data
            ) if self.input_type == 'segmentation_mask' else test_data
            if self.output_type == 'label':
                # we just use the predicted label as the caption for now
                captions = self._logits_to_captions(predictions)
                return [
                    wandb.Image(data, caption=captions[i])
                    for i, data in enumerate(test_data)
                ]
            elif self.output_type in ('image', 'images', 'segmentation_mask'):
                output_image_data = self._masks_to_pixels(
                    predictions
                ) if self.output_type == 'segmentation_mask' else predictions
                reference_image_data = self._masks_to_pixels(
                    test_output
                ) if self.output_type == 'segmentation_mask' else test_output
                input_images = [
                    wandb.Image(data, grouping=3)
                    for i, data in enumerate(input_image_data)
                ]
                output_images = [
                    wandb.Image(data)
                    for i, data in enumerate(output_image_data)
                ]
                reference_images = [
                    wandb.Image(data)
                    for i, data in enumerate(reference_image_data)
                ]
                return list(
                    chain.from_iterable(
                        zip(input_images, output_images, reference_images)))
            else:
                # unknown output, just log the input images
                return [wandb.Image(img) for img in test_data]
        elif self.output_type in ('image', 'images', 'segmentation_mask'):
            # unknown input, just log the predicted and reference outputs without captions
            output_image_data = self._masks_to_pixels(
                predictions
            ) if self.output_type == 'segmentation_mask' else predictions
            reference_image_data = self._masks_to_pixels(
                test_output
            ) if self.output_type == 'segmentation_mask' else test_output
            output_images = [
                wandb.Image(data, grouping=2)
                for i, data in enumerate(output_image_data)
            ]
            reference_images = [
                wandb.Image(data)
                for i, data in enumerate(reference_image_data)
            ]
            return list(
                chain.from_iterable(zip(output_images, reference_images)))
                   'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
                   'hair drier', 'toothbrush']

word_embeddings = []

for c in classes:
    inputs = tokenizer(c, return_tensors="pt")
    outputs = model(**inputs)
    last_hidden_states = outputs[0]
    #import pdb
    #pdb.set_trace()
    word_embedding = last_hidden_states[0].detach().numpy()
    #print(word_embedding.size())
    word_embedding = np.mean(word_embedding, axis=0)
    print('{} {}'.format(c, word_embedding.shape))
    word_embeddings.append(word_embedding)

word_embeddings = np.stack(word_embeddings, axis=0)
print('{}'.format(word_embeddings.shape))
import pickle

with open('bert_coco.pkl', 'wb') as f:
    pickle.dump(word_embeddings, f)

print("{}",format(type(word_embeddings)))
#print(word_embeddings.shape)
with open('data/coco_glove_word2vec.pkl', 'rb') as f:
    d = pickle.load(f)

print("{} {}".format(d.shape, type(d)))
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # 2.2 Particle potential energy at γ
        #
        # particle_info = open(potential_path + '/Particle potential-' + str(frame) + '.dump', 'r')
        # alllines = particle_info.readlines()
        # lines = alllines[9:]
        # particle_info.close()
        # for i in range(len(lines)):
        #     if (lines[i] == '\n'): del lines[i]
        # Par_id        = np.array([int(line.strip().split(' ')[0]) for line in lines])   # 字段以逗号分隔,这里取得是第1列
        # Par_type      = np.array([int(line.strip().split(' ')[1]) for line in lines])   # 字段以逗号分隔,这里取得是第2列
        # Par_potential = np.array([float(line.strip().split(' ')[6]) for line in lines]) # 字段以逗号分隔,这里取得是第7列
        # frame_par_potential_t0 = Par_potential[Par_type == 1]

        Par_metrics_t0 = np.stack((Par_nonaffine_um_t0, Par_temperature_t0, Par_D2min_t0, Par_shear_strain_t0), axis=1)
        time_corr = [[] for i in range(len(frame_interval))]
        for jdx, frame_shift in enumerate(frame_interval):

            # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            # 2.1 Particle nonaffine measures at γ+δγ
            #
            particle_info = open(dynamics_path + '/Particle dynamics-' + str(frame + frame_shift) + '.dump', 'r')
            alllines = particle_info.readlines()
            lines = alllines[9:]
            particle_info.close()
            for i in range(len(lines)):
                if (lines[i] == '\n'): del lines[i]
            Par_id                = np.array([int(line.strip().split(' ')[0]) for line in lines])     # 字段以逗号分隔,这里取得是第1列
            Par_type              = np.array([int(line.strip().split(' ')[1]) for line in lines])     # 字段以逗号分隔,这里取得是第2列
            Par_radius            = np.array([float(line.strip().split(' ')[2]) for line in lines])   # 字段以逗号分隔,这里取得是第3列
Beispiel #49
0
def main(_):
    bert_config = modeling.BertConfig.from_json_file(config_dict[FLAGS.model_size])

    if FLAGS.max_seq_length > bert_config.max_position_embeddings:
        raise ValueError(
            "Cannot use sequence length %d because the BERT model "
            "was only trained up to sequence length %d" %
            (FLAGS.max_seq_length, bert_config.max_position_embeddings))

    tpu_cluster_resolver = None
    if use_tpu:
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu)

    is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2

    run_config = tf.contrib.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        keep_checkpoint_max=1,
        model_dir=FLAGS.output_path,
        tpu_config=tf.contrib.tpu.TPUConfig(
            iterations_per_loop=iterations_per_loop,
            num_shards=num_tpu_cores,
            per_host_input_for_training=is_per_host))

    model_fn = model_fn_builder(
        bert_config=bert_config,
        num_labels=2,
        init_checkpoint=init_checkpoint,
        use_tpu=use_tpu,
        use_one_hot_embeddings=use_tpu)

    # If TPU is not available, this will fall back to normal Estimator on CPU
    # or GPU.
    estimator = tf.contrib.tpu.TPUEstimator(
        use_tpu=use_tpu,
        model_fn=model_fn,
        config=run_config,
        train_batch_size=FLAGS.batch_size,
        eval_batch_size=FLAGS.batch_size,
        predict_batch_size=FLAGS.batch_size,
        params={"qc_scores": "qc_scores"})

    tf.logging.info("***** Running evaluation *****")
    tf.logging.info("  Batch size = %d", FLAGS.batch_size)

    for split in ["valid", "test"]:

        maxp_run = load_run(os.path.join(FLAGS.first_model_path, "{}_{}_result.trec".format(FLAGS.dataset, split)))

        query_docids_map = []
        data_path = os.path.join(FLAGS.output_path, "rerank-{0}_kc-{1}".format(FLAGS.rerank_num, FLAGS.kc), "data")
        result_path = os.path.join(FLAGS.output_path, "rerank-{0}_kc-{1}".format(FLAGS.rerank_num, FLAGS.kc), "result")
        if not tf.gfile.Exists(result_path):
            tf.gfile.MakeDirs(result_path)

        with tf.gfile.Open(os.path.join(data_path, "chunk_passage_ids_{0}.txt".format(split))) as ref_file:
            for line in ref_file:
                query_docids_map.append(line.strip().split("\t"))

        predict_input_fn = input_fn_builder(
            dataset_path=os.path.join(data_path, "chunk_passage_{0}.tf".format(split)),
            is_training=False,
            seq_length=FLAGS.max_seq_length,
            drop_remainder=False)

        total_count = 0

        result_file = tf.gfile.Open(os.path.join(result_path, "{0}_{1}_result.trec".format(FLAGS.dataset, split)), 'w')

        ckpt = tf.train.latest_checkpoint(checkpoint_dir=FLAGS.third_model_path)
        print("use latest ckpt: {0}".format(ckpt))

        result = estimator.predict(input_fn=predict_input_fn,
                                   yield_single_examples=True,
                                   checkpoint_path=ckpt)

        start_time = time.time()
        results = []
        result_dict = collections.OrderedDict()
        for item in result:

            results.append((item["qc_scores"], item["probs"]))
            total_count += 1

            if total_count == len(query_docids_map) or query_docids_map[total_count][0] != \
                    query_docids_map[total_count - 1][0]:

                chunk_num = len(results) // FLAGS.rerank_num
                assert chunk_num <= FLAGS.kc

                qc_scores, probs = list(zip(*results))
                qc_scores = np.stack(qc_scores)
                cp_scores = np.stack(probs)[:, 1]

                qc_scores = np.reshape(qc_scores, [FLAGS.rerank_num, chunk_num])
                cp_scores = np.reshape(cp_scores, [FLAGS.rerank_num, chunk_num])

                # softmax normalization
                qc_scores = softmax(qc_scores, axis=-1)

                scores = np.sum(np.multiply(qc_scores, cp_scores), axis=-1, keepdims=False)

                start_idx = total_count - FLAGS.rerank_num * chunk_num
                end_idx = total_count
                query_ids, chunk_ids, passage_ids, labels, qc_scores = zip(*query_docids_map[start_idx:end_idx])
                assert len(set(query_ids)) == 1, "Query ids must be all the same."
                query_id = query_ids[0]

                candidate_docs = list()
                for pid in passage_ids:
                    doc_id = pid.split("_")[0]
                    if doc_id not in candidate_docs:
                        candidate_docs.append(doc_id)

                result_dict[query_id] = dict()

                for i, doc in enumerate(candidate_docs):
                    result_dict[query_id][doc] = scores[i]

                rerank_list = sorted(result_dict[query_id].items(), key=lambda x: x[1], reverse=True)

                last_score = rerank_list[-1][1]
                for doc in maxp_run[query_id][FLAGS.rerank_num:]:
                    current_score = last_score - 0.01
                    result_dict[query_id][doc] = current_score
                    last_score = current_score

                ranking_list = sorted(result_dict[query_id].items(), key=lambda x: x[1], reverse=True)

                for rank, (doc_id, score) in enumerate(ranking_list):
                    result_file.write(
                        "\t".join([query_id, "Q0", doc_id, str(rank + 1), str(score), "chunk_passage_PRF"]) + "\n")

                results = []

            if total_count % 1000 == 0:
                tf.logging.warn("Read {} examples in {} secs".format(
                    total_count, int(time.time() - start_time)))

        result_file.close()
        tf.logging.info("Done Evaluating!")
Beispiel #50
0
img_data = []
for img in users_df.index.values:
    input_img = cv2.imread(data_path + '/' + str(img)+'.jpg')
    input_img_resize = cv2.resize(input_img, (100, 100))
    img_data.append(input_img_resize)

print('loaded photos')
img_data = np.array(img_data)
# users_df = users_df[np.isin(users_df.index.values, ids)]
# %%
# feature_vectors =
# feature_vectors = np.loadtxt('feature_vectors_400_samples.txt')
# print("feature_vectors_shape:", feature_vectors.shape)
# print("num of images:", feature_vectors.shape[0])
# print("size of individual feature vector:", feature_vectors.shape[1])
feature_vectors = np.stack(users_df['vgg_face'])
features = tf.Variable(feature_vectors, name='features')

# Taken from: https://github.com/tensorflow/tensorflow/issues/6322
def images_to_sprite(data):
    """Creates the sprite image along with any necessary padding

    Args:
      data: NxHxW[x3] tensor containing the images.

    Returns:
      data: Properly shaped HxWx3 image with any necessary padding.
    """
    if len(data.shape) == 3:
        data = np.tile(data[..., np.newaxis], (1, 1, 1, 3))
    # data = data.astype(np.float32)
    
    #filters out just one chanel of the 3, the others are not used
    Chosen, garbagelevel, garbagelevel2 = cv2.split(img)
    
    #converts the single channel image into a numpy array
    CurrentImage = asarray(Chosen)
    
    #appends the array to the list initated before loop
    arraylist.append(CurrentImage)
    
    #prints shape of array to ensure that all images are the same size as program is running
    print(CurrentImage.shape)
  
    
#once loop is complete this function stacks all of the images within the list
DataSet = np.stack(arraylist,axis = 2)
#this prints the shape of the entire dataset to be saved
print(DataSet.shape)
#saves as a compressed .npz file 
savez_compressed('P:/Python Projects/MIT/MasonIsAnImageSavingGenius.npz', DataSet)

'''
#this was a test to ensure that the saved images can be opened and all look good

#loads data from file that was just saved
loaddata =load('P:/Python Projects/MIT/MasonIsAnImageSavingGenius.npz')

#Because noname was given in the savez_compressed line this is the auto one to ensure propper data load
DataArray = loaddata['arr_0']

Beispiel #52
0
def evaluation_preproc(model_output, args={}):
    graph_loader = args['graph_loader']
    heat = args.get('heat', 100.)
    edge_thresh = args.get('edge_thresh', None)
    heat_thresh = args.get('heat_thresh', None)
    heat_thresh_is_relative = args.get('heat_thresh_is_relative', True)
    verbose_rate = args.get('verbose_rate', None)
    verbose = args.get('verbose', False)
    topk = args.get('top_k', 1)
    match_zero = args.get('match_zero', True)
    add_full_proposal = args.get('add_full_proposal', False)
    num_proposals = args.get('num_proposals', None)
    gt_entities = args.get('gt_entities', False)

    freq_prior_mtx = args.get('freq_prior', None)
    freq_prior_weight = args.get('freq_prior_weight', 0.0)
    filter_nonoverlap = args.get('filter_nonoverlap', False)
    self_confidence = args.get('self_confidence', False)
    rank_triplet_confidence = args.get('rank_triplet_confidence', True)

    assert (not match_zero)
    noun_prior = args.get('noun_prior', [1.0])
    verb_prior = args.get('verb_prior', [1.0])

    # reading output graph
    pred_conf = None
    image_id = model_output['image_id']
    ent_prob = [
        item[:n] for item, n in zip(model_output['out_ent_prob'],
                                    model_output['out_num_ent'])
    ]
    pred_prob = [
        item[:n] for item, n in zip(model_output['out_pred_prob'],
                                    model_output['out_num_pred'])
    ]
    if self_confidence:
        pred_conf = [
            item[:n] for item, n in zip(model_output['out_pred_conf'],
                                        model_output['out_num_pred'])
        ]
    pred_roles = [
        item[:, :np, :ne] for item, np, ne in zip(
            model_output['out_pred_roles'], model_output['out_num_pred'],
            model_output['out_num_ent'])
    ]

    # Sparsifying the graph
    pred_roles_idx = []
    for i in range(len(pred_roles)):
        #pred_roles[i] = pred_roles[i] >= pred_roles[i].max(axis=-1, keepdims=True) if edge_thresh is None else edge_thresh
        if edge_thresh is None:
            max_idx = np.argmax(pred_roles[i], axis=-1)
            pred_roles[i] = np.zeros(pred_roles[i].shape, dtype='bool')
            pr_idx = np.zeros((pred_roles[i].shape[1], pred_roles[i].shape[0]),
                              dtype=np.int32)
            for role in range(max_idx.shape[0]):
                for j in range(max_idx.shape[1]):
                    pred_roles[i][role, j, max_idx[role, j]] = True
                    pr_idx[j, role] = max_idx[role, j]
            pred_roles_idx.append(pr_idx)
        else:
            pred_roles[i] = pred_roles[i] >= edge_thresh

    ent_box = []
    for i in range(len(image_id)):
        prop = np.copy(args['proposals'][image_id[i]])
        ent_box.append(prop)
    num_prop = max([prop.shape[0] for prop in ent_box])
    box_array = np.zeros((len(image_id), num_prop, 4))
    for i, box in enumerate(ent_box):
        box_array[i, :box.shape[0]] = box
    ent_box = list(box_array)

    if verbose:
        print('Classifying embeddings...')

    # Fetching the ground truth graph
    idx = np.asarray([graph_loader.imgid2idx[i] for i in image_id])
    gt_graph = graph_loader.get_gt_batch(idx, pack=False)

    # Classifying nodes
    ent_lbl = []
    pred_lbl = []
    ent_score = []
    pred_score = []
    for i in range(len(image_id)):
        if gt_entities:
            nouns_label = np.copy(gt_graph['ent_lbl'][i]).reshape(
                (gt_graph['ent_lbl'][i].shape[0], 1))
            nouns_score = np.ones((gt_graph['ent_lbl'][i].shape[0], ))
        else:
            if topk > 1:
                nouns_label = np.argsort(-ent_prob[i], axis=-1)[:, :topk]
            else:
                nouns_label = np.argmin(-ent_prob[i], axis=-1)[:, np.newaxis]
            nouns_score = np.asarray(
                [[ent_prob[i][j, nouns_label[j, k]] for k in range(topk)]
                 for j in range(nouns_label.shape[0])])
            nouns_score = np.sum(nouns_score, axis=-1)

        # Late-fusion with frequency prior
        if freq_prior_weight > 0.0:
            n = nouns_label.shape[0]
            pred_dist = np.zeros((n * n, pred_prob[i].shape[1]))
            pred_dist[pred_roles_idx[i][:, 0] * n +
                      pred_roles_idx[i][:, 1]] = (
                          1 - freq_prior_weight) * pred_prob[i]
            temp = np.tile(np.arange(n)[:, np.newaxis], (1, n))
            pred_roles_idx[i] = np.stack((temp, temp.T), axis=-1).reshape(
                (n * n, 2))
            pred_dist += freq_prior_weight * freq_prior_mtx[
                nouns_label[pred_roles_idx[i][:, 0],
                            0], nouns_label[pred_roles_idx[i][:, 1], 0]]
            pred_roles[i] = np.eye(n, dtype=np.bool)[pred_roles_idx[i].T, :]
        else:
            pred_dist = pred_prob[i]

        if topk > 1:
            preds_label = np.argsort(-pred_dist, axis=-1)[:, :topk]
        else:
            preds_label = np.argmin(-pred_dist, axis=-1)[:, np.newaxis]
        if self_confidence:
            nouns_score = np.ones((nouns_label.shape[0], ))
            preds_score = np.copy(pred_conf[i])
        else:
            preds_score = np.asarray(
                [[pred_dist[j, preds_label[j, k]] for k in range(topk)]
                 for j in range(preds_label.shape[0])])
            preds_score = np.sum(preds_score, axis=-1)

        if filter_nonoverlap:
            overlap = (pw_iou(ent_box[i], ent_box[i]) > 0.0).astype(np.float32)
            preds_score *= overlap[pred_roles_idx[i][:, 0],
                                   pred_roles_idx[i][:, 1]]

        ent_lbl.append(nouns_label)
        pred_lbl.append(preds_label)
        ent_score.append(nouns_score)
        pred_score.append(preds_score)

    # sorting ...
    for i in range(len(image_id)):
        ts = np.copy(pred_score[i])
        if rank_triplet_confidence:
            for r in range(pred_roles[i].shape[0]):
                ts *= np.matmul(pred_roles[i][r], ent_score[i])
        sort_idx_pred = np.argsort(-ts)
        pred_lbl[i] = pred_lbl[i][sort_idx_pred]
        pred_score[i] = pred_score[i][sort_idx_pred]
        pred_roles[i] = pred_roles[i][:, sort_idx_pred, :]

    # Here is the final detected graph
    det_graph = {
        'ent_lbl': ent_lbl,
        'ent_score': ent_score,
        'ent_box': ent_box,
        'pred_lbl': pred_lbl,
        'pred_score': pred_score,
        'pred_roles': pred_roles,
    }

    return det_graph, gt_graph
Beispiel #53
0
 def get_stacked_images(self):
     return np.stack(self.images, axis = 2)
Beispiel #54
0
}

# KITTI use IOU threshold for gt matching, here the threshold level array is with shape [num_difficulties, num_Classes].
# KITTI uses three levels of difficulties by default.
KITTI_OVERLAP_MODERATE = np.array([[0.5, 0.7, 0.7, 0.5, 0.5, 0.7, 0.5], [0.5, 0.7, 0.7, 0.5, 0.5, 0.7, 0.5],
                                   [0.5, 0.7, 0.7, 0.5, 0.5, 0.7, 0.5]])
KITTI_OVERLAP_EASY_2D = np.array([[0.5, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5], [0.5, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5],
                                [0.5, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5]])
KITTI_OVERLAP_EASY_BEV = np.array([[0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5], [0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5],
                                   [0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5]])
KITTI_OVERLAP_EASY_3D = np.array([[0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5], [0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5],
                                [0.25, 0.5, 0.5, 0.25, 0.25, 0.5, 0.5]])

# Create threshold array for two levels of threshold for each metric class. [2, 3, 7] -> [Thresholds, Difficulties,
# Classes]
KITTI_OVERLAPs_2D = np.stack([KITTI_OVERLAP_MODERATE, KITTI_OVERLAP_EASY_2D], axis=0)
KITTI_OVERLAPs_BEV = np.stack([KITTI_OVERLAP_MODERATE, KITTI_OVERLAP_EASY_BEV], axis=0)
KITTI_OVERLAPs_3D = np.stack([KITTI_OVERLAP_MODERATE, KITTI_OVERLAP_EASY_3D], axis=0)

# Create threshold array by combining subarrays for each metric. [4, 2, 3, 7] -> [Metric_types, Thresholds,
# Difficulties, Classes]
KITTI_OVERLAP_THRESHOLDS = np.stack([KITTI_OVERLAPs_2D, KITTI_OVERLAPs_BEV, KITTI_OVERLAPs_3D, KITTI_OVERLAPs_3D], axis=0)


# NuScenes use distance thresh for gt matching,
# here the threshold level array is with shape [num_difficulties, num_Classes] for each threshold level.
NU_OVERLAP_MODERATE = np.array([[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
                                [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]])

NU_OVERLAP_EASY = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                            [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]])
Beispiel #55
0
def compute_AFS_descriptors(configurations,
                            n_max,
                            l_max,
                            r_cut,
                            dimensions,
                            radial_function_type='g_function',
                            reg_eigenvalues=0.,
                            neighbors_in_r_cut=False,
                            radial_tensor_product=False):
    """Implementation of the formula given in section III.G (page 9).
       The indices i and i' in the sum are interpreted as the neighbor indices of a central atom.
       If neighbors_in_r_cut=True, we add the constraint that the neighbors i and i' must be at a distance <= r_cut
    """
    assert radial_function_type in [
        'g_function', 'gaussian'
    ], f'invalid radial function type {radial_function_type}'

    l_values = np.arange(l_max + 1).reshape(1, 1, -1)

    if radial_function_type == 'g_function':
        W_matrix = compute_W_matrix(n_max, reg=reg_eigenvalues)
        alphas = np.arange(1, n_max + 1).astype('float64').reshape((1, -1))
        exponents = alphas + 2
        normalizing_constants = np.sqrt(2 * alphas + 5) / np.power(
            r_cut, alphas + 2.5)
    elif radial_function_type == 'gaussian':
        centers = np.linspace(0, r_cut, n_max, endpoint=False).reshape((1, -1))
        sigma = 0.5 * centers[0, 1]

    if radial_tensor_product:
        AFS_descriptors = np.zeros(
            (configurations.shape[0], configurations.shape[1], n_max**2,
             l_max + 1))
    else:
        AFS_descriptors = np.zeros((configurations.shape[0],
                                    configurations.shape[1], n_max, l_max + 1))

    for i_config in tqdm(range(configurations.shape[0])):
        configuration = configurations[i_config]
        periodized_configuration, initial_atom_ids = periodize_configuration(
            configuration, r_cut, dimensions)
        point_tree = spatial.cKDTree(periodized_configuration)

        for i_atom in range(configuration.shape[0]):
            atom = configuration[i_atom:i_atom + 1]
            neighbors_indices = [
                n_id for n_id in point_tree.query_ball_point(
                    configuration[i_atom], r_cut)
                if initial_atom_ids[n_id] != i_atom
            ]
            neighbors = periodized_configuration[neighbors_indices]
            r_vectors = neighbors - atom
            r_norms = np.linalg.norm(r_vectors, axis=1, keepdims=True)
            if radial_function_type == 'g_function':
                phi_functions = normalizing_constants * (r_cut -
                                                         r_norms)**exponents
                radial_functions = np.dot(phi_functions, W_matrix)
            elif radial_function_type == 'gaussian':
                radial_functions = gaussian(r_norms, centers, sigma)

            r_normalized = r_vectors / r_norms
            cos_angles = np.dot(r_normalized, r_normalized.transpose())

            # triangular-upper mask corresponding to pairs (i,j) with i<j, i.e. pair of different atoms
            n_neighbors = neighbors.shape[0]
            triu_mask = np.arange(n_neighbors)[:,
                                               None] < np.arange(n_neighbors)
            if neighbors_in_r_cut:
                neighbors_indices_pdist_matrix = spatial.distance.squareform(
                    spatial.distance.pdist(neighbors))
                neighbors_in_r_cut_mask = neighbors_indices_pdist_matrix < r_cut
                triu_mask *= neighbors_in_r_cut_mask

            # triangular-upper indices correspond to pairs (i,j) with i<j, i.e. pair of different atoms
            cos_angles = np.clip(cos_angles, -1, 1)
            angles_triu = np.arccos(cos_angles[triu_mask]).reshape(-1, 1, 1)

            cos_l_angles_triu = np.cos(l_values * angles_triu)

            if radial_tensor_product:
                radial_functions_product_triu = np.tensordot(
                    radial_functions.transpose(), radial_functions,
                    axes=0).transpose(1, 2, 0, 3)[triu_mask, :, :].reshape(
                        -1, n_max**2, 1)
            else:
                radial_functions_product_triu = np.stack([
                    np.dot(radial_functions[:, n:n + 1],
                           radial_functions[:, n:n + 1].transpose())[triu_mask]
                    for n in range(n_max)
                ],
                                                         axis=-1)[:, :,
                                                                  np.newaxis]

            AFS_descriptors[i_config,
                            i_atom] += (radial_functions_product_triu *
                                        cos_l_angles_triu).sum(axis=0)

    return AFS_descriptors
def _handler_rgb_l1(ir_path, vis_path, model_path, model_pre_path, ssim_weight, index, output_path=None):
	# ir_img = get_train_images(ir_path, flag=False)
	# vis_img = get_train_images(vis_path, flag=False)
	ir_img = get_test_image_rgb(ir_path, flag=False)
	vis_img = get_test_image_rgb(vis_path, flag=False)
	dimension = ir_img.shape

	ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
	vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])

	#ir_img = np.transpose(ir_img, (0, 2, 1, 3))
	#vis_img = np.transpose(vis_img, (0, 2, 1, 3))

	ir_img1 = ir_img[:, :, :, 0]
	ir_img1 = ir_img1.reshape([1, dimension[0], dimension[1], 1])
	ir_img2 = ir_img[:, :, :, 1]
	ir_img2 = ir_img2.reshape([1, dimension[0], dimension[1], 1])
	ir_img3 = ir_img[:, :, :, 2]
	ir_img3 = ir_img3.reshape([1, dimension[0], dimension[1], 1])

	vis_img1 = vis_img[:, :, :, 0]
	vis_img1 = vis_img1.reshape([1, dimension[0], dimension[1], 1])
	vis_img2 = vis_img[:, :, :, 1]
	vis_img2 = vis_img2.reshape([1, dimension[0], dimension[1], 1])
	vis_img3 = vis_img[:, :, :, 2]
	vis_img3 = vis_img3.reshape([1, dimension[0], dimension[1], 1])

	print('img shape final:', ir_img1.shape)

	with tf.Graph().as_default(), tf.Session() as sess:
		infrared_field = tf.placeholder(
			tf.float32, shape=ir_img1.shape, name='content')
		visible_field = tf.placeholder(
			tf.float32, shape=ir_img1.shape, name='style')

		dfn = DenseFuseNet(model_pre_path)

		enc_ir = dfn.transform_encoder(infrared_field)
		enc_vis = dfn.transform_encoder(visible_field)

		target = tf.placeholder(
			tf.float32, shape=enc_ir.shape, name='target')

		output_image = dfn.transform_decoder(target)

		# restore the trained model and run the style transferring
		saver = tf.train.Saver()
		saver.restore(sess, model_path)

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img1, visible_field: vis_img1})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output1 = sess.run(output_image, feed_dict={target: feature})

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img2, visible_field: vis_img2})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output2 = sess.run(output_image, feed_dict={target: feature})

		enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis], feed_dict={infrared_field: ir_img3, visible_field: vis_img3})
		feature = L1_norm(enc_ir_temp, enc_vis_temp)
		output3 = sess.run(output_image, feed_dict={target: feature})

		output1 = output1.reshape([1, dimension[0], dimension[1]])
		output2 = output2.reshape([1, dimension[0], dimension[1]])
		output3 = output3.reshape([1, dimension[0], dimension[1]])

		output = np.stack((output1, output2, output3), axis=-1)
		#output = np.transpose(output, (0, 2, 1, 3))
		save_images(ir_path, output, output_path,
		            prefix='fused' + str(index), suffix='_densefuse_l1norm_'+str(ssim_weight))
Beispiel #57
0
  nms_threshold_abs = 0.0
  nms_threshold_rel = 0.25
  loss_type = 'maxlikelihood'
  model_type = 'clf+unet-simple'
  n_ch_in = 1
  n_classes = 2
  
  
  
  #%%
  model = EggLayingDetector(n_ch_in, n_classes)
  model.eval()
 #%%
 
  
  #%%
  root_dir = Path.home() / 'workspace/WormData/egg_laying/data/v1_0.5x/test'
  gen = SnippetsFullFlow(root_dir)
  #%%
  batch = []
  for _ in range(4):
      snippet, is_egg_laying  = gen[0]
      batch.append((snippet, is_egg_laying))
  #%%
  snippet, is_egg_laying =  zip(*batch)
  X = torch.from_numpy(np.stack(snippet))
  y = torch.from_numpy(np.stack(is_egg_laying))
  
  #%%
  xout = model(X)
  
Beispiel #58
0
def gauss_sample(data, decay_chain, r_name, sigma, sample_N, dat_order):
    sigma_delta = 5

    def gauss(delta_x):
        return tf.exp(-(delta_x**2) / (2 * sigma**2))

    angle = cal_helicity_angle(data["particle"],
                               decay_chain.standard_topology())
    decay_chain.standard_topology()
    tp_map = decay_chain.topology_map()

    r_particle = tp_map[get_particle(r_name)]

    for i in decay_chain.standard_topology():
        if i.core == r_particle:
            m_min = sum(data["particle"][j]["m"] for j in i.outs)
        print(i.outs, r_particle)
        if any(r_particle == j for j in i.outs):
            m_max = (data["particle"][i.core]["m"] -
                     sum(data["particle"][j]["m"]
                         for j in i.outs) + data["particle"][r_particle]["m"])

    print("min, max: ", m_min, m_max)
    mass = {}
    weights = []
    for i in data["particle"]:
        mi = data["particle"][i]["m"]
        if i == r_particle:
            delta_min = tf.where(
                m_min - mi > -sigma_delta * sigma,
                m_min - mi,
                -sigma_delta * sigma,
            )
            delta_max = tf.where(
                m_max - mi > sigma_delta * sigma,
                sigma_delta * sigma,
                m_max - mi,
            )
            delta_m = (delta_max - delta_min) / (sample_N + 1)
            print("delta_min:", delta_min)
            min_m = mi + delta_min + delta_m / 2
            mi_s = []
            for j in range(sample_N):
                mi_s_i = min_m + delta_m * j
                mi_s.append(mi_s_i)
                weights.append(gauss(mi_s_i - mi))
            mass[i] = tf.stack(mi_s)
        else:
            mass[i] = mi[None, :]

    # print(mass[r_particle], np.mean(mass[r_particle]))

    weights = tf.stack(weights)
    weights = weights / tf.reduce_sum(weights, axis=0)
    data_weights = data.get("weight", tf.ones_like(weights))

    total_weights = weights * data_weights

    print({k: v.shape for k, v in mass.items()})

    mask = True
    p4_all = {}
    for i in decay_chain:
        phi = angle[tp_map[i]][tp_map[i.outs[0]]]["ang"]["alpha"]
        theta = angle[tp_map[i]][tp_map[i.outs[0]]]["ang"]["beta"]

        m0 = mass[tp_map[i.core]]
        m1 = mass[tp_map[i.outs[0]]]
        m2 = mass[tp_map[i.outs[1]]]

        p_square = get_relative_p2(m0, m1, m2)

        print(m0.shape, m1.shape, m2.shape, p_square.shape)

        p = tf.sqrt(tf.where(p_square > 0, p_square, 0))
        pz = p * tf.cos(theta)
        px = p * tf.sin(theta) * tf.cos(phi)
        py = p * tf.sin(theta) * tf.sin(phi)
        E1 = tf.sqrt(m1 * m1 + p * p)
        E2 = tf.sqrt(m2 * m2 + p * p)
        p1 = tf.stack([E1, px, py, pz], axis=-1)
        p2 = tf.stack([E2, -px, -py, -pz], axis=-1)
        p4_all[i.outs[0]] = p1
        p4_all[i.outs[1]] = p2

    print("p shape", {k: v.shape for k, v in p4_all.items()})

    core_boost = {}
    for i in decay_chain:
        if i.core != decay_chain.top:
            core_boost[i.outs[0]] = i.core
            core_boost[i.outs[1]] = i.core
    ret = {}
    for i in decay_chain.outs:
        tmp = i
        ret[i] = p4_all[i]
        while tmp in core_boost:
            tmp = core_boost[tmp]
            # print(i, tmp)
            print(tmp)
            ret[i] = lv.rest_vector(lv.neg(p4_all[tmp]), ret[i])

    ret2 = {}
    mask = tf.expand_dims(mask, -1)
    for i in ret:
        ret2[i] = tf.where(mask, ret[i], data["particle"][tp_map[i]]["p"])

    print("ret2:", {k: v.shape for k, v in ret2.items()})
    # print({i: data["particle"][tp_map[i]]["p"] for i in decay_chain.outs})

    pi = np.stack([ret2[i] for i in dat_order], axis=-2)
    pi = np.transpose(pi, (1, 0, 2, 3))
    total_weights = np.transpose(total_weights.numpy(), (1, 0))
    print(pi.shape)
    return pi, total_weights
Beispiel #59
0
        frame_stitch[y_start:y_end, x_start:x_end, :] = face_stitch

        # Display the images
        cv2.rectangle(frame, (face_x, face_y),
                      (face_x + face_w, face_y + face_h), (0, 255, 0), 2)
        frame = cv2.resize(frame, (512, 512), interpolation=cv2.INTER_AREA)
        frame_stitch = cv2.resize(frame_stitch, (512, 512),
                                  interpolation=cv2.INTER_AREA)
        face_patch_crop = cv2.resize(face_patch_crop, (512, 512),
                                     interpolation=cv2.INTER_AREA)
        frame_hat = cv2.resize(frame_hat, (512, 512),
                               interpolation=cv2.INTER_AREA)

    else:

        frame = cv2.resize(frame, (512, 512), interpolation=cv2.INTER_AREA)
        frame_stitch = np.zeros((512, 512, 3))
        face_patch_crop = np.zeros((512, 512, 3))
        frame_hat = np.zeros((512, 512, 3))

    figure1 = np.stack([frame, frame_stitch], axis=0)
    figure2 = np.stack([face_patch_crop, frame_hat], axis=0)
    figure = np.stack([figure1, figure2], axis=1)
    figure = stack_images(figure)

    cv2.imshow('frame', figure)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
print("[INFO] Loaded model '{}' from disk".format(model_name))

print("[INFO] Predicting and generating submission file")

testingDataset = glob(os.path.join(test_path,'*.tif'))
submissionDataframe = pd.DataFrame()
testingDatasetSize = len(testingDataset)
print("[INFO] Predicting on '{}' data".format(testingDatasetSize))

testing_batch_size = 32
for index in range(0, testingDatasetSize, testing_batch_size):
    print("[INFO] Predicting on batch: %i - %i"%(index, index+testing_batch_size))
    df = pd.DataFrame({'path': testingDataset[index:index+testing_batch_size]})
    df['id'] = df.path.map(lambda x: os.path.basename(x).split(".")[0])
    df['image'] = df['path'].map(imread)
    stack = np.stack(df.image, axis=0)
    predictions = [loaded_model.predict(np.expand_dims(item / 255.0, axis=0))[0][0] for item in stack]
    df['label'] = predictions
    submissionDataframe = pd.concat([submissionDataframe, df[['id', 'label']]])

print("[INFO] Generating submission file")

submissionFilePath = os.path.sep.join([submission_file_path, ("submission__" + strftime("%Y-%m-%d_%H-%M-%S", gmtime()) + ".csv")])
submissionDataframe.to_csv(submissionFilePath, index = False, header = True)

#clearing ram, make some free space
gc.collect()

print("[INFO] Done")