コード例 #1
0
ファイル: hello.py プロジェクト: nagasudhindra/python
def main():
    # print("Hello ", sys.argv[1])
    print("Square of 9: ", power(9, 2))
    print("Docstring of power: ", power.__doc__)
    print("power type: ", type(power))
    print(repeat("Python", 3))
    print(repeat("-", 100))
コード例 #2
0
    def __init__(self,
                 name,
                 is_small=False,
                 is_mini=False,
                 longer_repeat=5,
                 logger=None):
        logger = logger or logging.getLogger(__name__)

        name = name.lower()
        lookup_tables_dir_path = os.path.join(dir_path, name2dir[name])
        if not os.path.isdir(lookup_tables_dir_path):
            raise NotImplementedError(
                "Folder at {} does not exist".format(lookup_tables_dir_path))

        generation_arguments_path = os.path.join(lookup_tables_dir_path,
                                                 'generation_arguments.txt')
        if not os.path.isfile(generation_arguments_path):
            raise NotImplementedError(
                "Generation Arguments .txt Missing in Table Lookup Folder \
                - Cannot Generate Table")

        lookup_tables_data_dir_path = os.path.join(lookup_tables_dir_path,
                                                   "data")

        if not os.path.isdir(lookup_tables_data_dir_path):
            logger.info(
                "Data not present for {} \n Generating Dataset".format(name))
            make_long_lookup_tables(lookup_tables_data_dir_path,
                                    generation_arguments_path)

        # Get default params from json
        # - these are not required but offer recommendation on default params
        default_params = get_default_params(lookup_tables_dir_path)

        # Update the defauls params if task is small /mini
        if default_params is not None:
            if is_small:
                default_params["task_defaults"]["k"] = 1
            if is_mini:
                default_params["task_defaults"]["k"] = 1
                default_params["task_defaults"]["batch_size"] = 128
                default_params["task_defaults"]["patience"] = 2
                default_params["task_defaults"]["epochs"] = 3
                default_params["task_defaults"]["n_attn_plots"] = 1

        train_file = "train"
        valid_file = "validation"
        test_files = flatten([
            "heldout_inputs", "heldout_compositions", "heldout_tables",
            "new_compositions",
            repeat("longer_seen", longer_repeat),
            repeat("longer_incremental", longer_repeat),
            repeat("longer_new", longer_repeat)
        ])

        super().__init__(name, lookup_tables_data_dir_path, train_file,
                         valid_file, test_files, default_params)
コード例 #3
0
    def test_move_randomly_moves_one_tile_only(self):
        def single_run():
            start_pos = self.enemy.position
            self.enemy.move_randomly()
            diff = tuple_diff(self.enemy.position, start_pos)

            valid_diffs = { -1, 1, 0 }
            assert diff[0] in valid_diffs and diff[1] in valid_diffs

        repeat(single_run, self.RUNS)
コード例 #4
0
    def do_triplet(embeddings, nexamples, ninstances):
        """Applies the triplet loss to the given embeddings."""
        # Empirically, normalizing the embeddings is more robust.
        embeddings = tf.nn.l2_normalize(embeddings, axis=-1)

        # Generate the labels as [0 0 0 1 1 1 ...]
        labels = utils.repeat(tf.range(ninstances), nexamples)

        # Apply batch-hard loss with a soft-margin.
        losses_tri = batch_hard(embeddings,
                                labels,
                                margin=0.0,
                                soft=True,
                                sample_pos=False,
                                sample_neg=False)
        return tf.reduce_mean(losses_tri)
コード例 #5
0
if has_unknown_orgs:
  print('Had unknown orgs, stopping')
  sys.exit(1)


orgs, venues, events = {}, {}, []

def _getOrganizersAndEvents(org_id):
  global events, orgs
  org = utils.eventbriteApi('organizers/%d/' % org_id)
  orgs[org_id] = org

  org_events = utils.eventbriteApi(
    'organizers/%d/events/?start_date.range_start=2010-01-01T00:00:00&status=all' % org_id)
  events += [e for e in org_events['events'] if 'venue_id' in e and e['venue_id'] is not None]
utils.repeat(included_organizers, 'Fetching organization data for %d', _getOrganizersAndEvents)


def _getVenueInfo(venue_id):
  global venues
  venue = utils.eventbriteApi('venues/%d/' % venue_id)
  # some organizations do events world-wide, not in RO only
  if venue['address']['country'] != 'RO': return
  venues[venue_id] = venue
unique_venues = frozenset(int(e['venue_id']) for e in events)
utils.repeat(unique_venues, 'Fetching venue information for %d', _getVenueInfo)


# filter out events not from RO
events = [e for e in events if int(e['venue_id']) in venues]
コード例 #6
0

def sample(erp, name=None, *params):
    """
    Generate sample from ERP
    :param erp:
    :type erp: ERP
    :param params:
    :return:
    """
    if mh.MCMC_shared.mh_flag:
        return mh.trace_update(erp, name, *params)
    return erp.sample(*params)


# def flip(p=0.5):
# return FixedERP(FlipERP(), [p])
#
#
# def uniform(low=0., high=1.):
#     return FixedERP(UniformERP(), [low, high])

# flip = partial(sample, FlipERP())
uniform = partial(sample, UniformERP())
gaussian = partial(sample, GaussianERP())

if __name__ == '__main__':
    samples = repeat(gaussian, 100000)
    plot.hist(samples, bins=30)
    plot.show()
コード例 #7
0
    def _interpolate(self,
                     im,
                     points,
                     min_ref_grid,
                     max_ref_grid,
                     method="bilinear",
                     padding_mode="zeros",
                     padding_mode_value=0.):
        num_batch = tf.shape(im)[0]
        vol_shape_xyz = tf.cast(tf.concat(
            [tf.shape(im)[1:-1][1::-1],
             tf.shape(im)[1:-1][2:]], axis=0),
                                dtype=tf.float32)
        width = vol_shape_xyz[0]
        height = vol_shape_xyz[1]
        depth = vol_shape_xyz[2]
        width_i = tf.cast(width, dtype=tf.int32)
        height_i = tf.cast(height, dtype=tf.int32)
        depth_i = tf.cast(depth, dtype=tf.int32)
        channels = tf.shape(im)[-1]
        num_row_major = tf.cast(tf.math.cumprod(vol_shape_xyz), dtype=tf.int32)
        shape_output = tf.stack([num_batch, num_row_major[-1], 1])
        zero = tf.zeros([], dtype=tf.float32)
        zero_i = tf.zeros([], dtype=tf.int32)
        ibatch = utils.repeat(
            num_row_major[-1] * tf.range(num_batch, dtype=tf.int32),
            num_row_major[-1])

        # scale positions to [0, width/height - 1]
        coeff_x = (width - 1.) / (max_ref_grid[0] - min_ref_grid[0])
        coeff_y = (height - 1.) / (max_ref_grid[1] - min_ref_grid[1])
        coeff_z = (depth - 1.) / (max_ref_grid[2] - min_ref_grid[2])
        ix = (coeff_x * points[:, 0, :]) - (coeff_x * min_ref_grid[0])
        iy = (coeff_y * points[:, 1, :]) - (coeff_y * min_ref_grid[1])
        iz = (coeff_z * points[:, 2, :]) - (coeff_z * min_ref_grid[2])

        # zeros padding mode, for positions outside of refrence grid
        cond = tf.math.logical_or(
            tf.math.equal(padding_mode, tf.constant("zeros", dtype=tf.string)),
            tf.math.equal(padding_mode, tf.constant("value", dtype=tf.string)))

        def evaluate_valid():
            return tf.expand_dims(
                tf.cast(tf.less_equal(ix, width - 1.)
                        & tf.greater_equal(ix, zero)
                        & tf.less_equal(iy, height - 1.)
                        & tf.greater_equal(iy, zero)
                        & tf.less_equal(iz, depth - 1.)
                        & tf.greater_equal(iz, zero),
                        dtype=tf.float32), -1)

        def default():
            return tf.ones([], dtype=tf.float32)

        valid = tf.cond(cond, evaluate_valid, default)

        # if we use bilinear interpolation, we calculate each area between corners and positions to get the weights for each input pixel
        def bilinear():
            output = tf.zeros(shape_output, dtype=tf.float32)

            # get north-west-top corner indexes based on the scaled positions
            ix_nwt = tf.clip_by_value(tf.floor(ix), zero, width - 1.)
            iy_nwt = tf.clip_by_value(tf.floor(iy), zero, height - 1.)
            iz_nwt = tf.clip_by_value(tf.floor(iz), zero, depth - 1.)
            ix_nwt_i = tf.cast(ix_nwt, dtype=tf.int32)
            iy_nwt_i = tf.cast(iy_nwt, dtype=tf.int32)
            iz_nwt_i = tf.cast(iz_nwt, dtype=tf.int32)

            #gettings all offsets to create corners
            offset_corner = tf.constant(
                [[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [0., 1., 1.],
                 [1., 0., 0.], [1., 0., 1.], [1., 1., 0.], [1., 1., 1.]],
                dtype=tf.float32)
            offset_corner_i = tf.cast(offset_corner, dtype=tf.int32)

            for c in range(8):
                # getting all corner indexes from north-west-top corner
                ix_c = ix_nwt + offset_corner[-c - 1, 0]
                iy_c = iy_nwt + offset_corner[-c - 1, 1]
                iz_c = iz_nwt + offset_corner[-c - 1, 2]

                # area is computed using the opposite corner
                nc = tf.expand_dims(
                    tf.abs((ix - ix_c) * (iy - iy_c) * (iz - iz_c)), -1)

                # current corner position
                ix_c = ix_nwt_i + offset_corner_i[c, 0]
                iy_c = iy_nwt_i + offset_corner_i[c, 1]
                iz_c = iz_nwt_i + offset_corner_i[c, 2]

                # gather input image values from corners idx, and calculate weighted pixel value
                idx_c = ibatch + tf.clip_by_value(ix_c, zero_i, width_i - 1) \
                        + num_row_major[0] * tf.clip_by_value(iy_c, zero_i, height_i - 1) \
                        + num_row_major[1] * tf.clip_by_value(iz_c, zero_i, depth_i - 1)
                Ic = tf.gather(tf.reshape(im, [-1, channels]), idx_c)

                output += nc * Ic
            return output

        # else if method is nearest neighbor, we get the nearest corner
        def nearest_neighbor():
            # get rounded indice corner based on the scaled positions
            ix_nn = tf.cast(tf.clip_by_value(tf.round(ix), zero, width - 1.),
                            dtype=tf.int32)
            iy_nn = tf.cast(tf.clip_by_value(tf.round(iy), zero, height - 1.),
                            dtype=tf.int32)
            iz_nn = tf.cast(tf.clip_by_value(tf.round(iz), zero, depth - 1.),
                            dtype=tf.int32)

            # gather input pixel values from nn corner indexes
            idx_nn = ibatch + ix_nn + num_row_major[0] * iy_nn + num_row_major[
                1] * iz_nn
            output = tf.gather(tf.reshape(im, [-1, channels]), idx_nn)
            return output

        cond_bilinear = tf.math.equal(method,
                                      tf.constant("bilinear", dtype=tf.string))
        cond_nn = tf.math.equal(method, tf.constant("nn", dtype=tf.string))
        output = tf.case([(cond_bilinear, bilinear),
                          (cond_nn, nearest_neighbor)],
                         exclusive=True)

        # padding mode
        cond_border = tf.math.equal(padding_mode,
                                    tf.constant("border", dtype=tf.string))
        cond_zero = tf.math.equal(padding_mode,
                                  tf.constant("zeros", dtype=tf.string))
        cond_value = tf.math.equal(padding_mode,
                                   tf.constant("value", dtype=tf.string))

        def border_padding_mode():
            return output

        def zero_padding_mode():
            return output * valid

        def value_padding_mode():
            return output * valid + padding_mode_value * (1. - valid)

        output = tf.case([(cond_border, border_padding_mode),
                          (cond_zero, zero_padding_mode),
                          (cond_value, value_padding_mode)],
                         exclusive=True)

        return output
コード例 #8
0
ファイル: check-events.py プロジェクト: it-events-ro/scripts
meetups = {k: v for k, v in meetups.items() if _inTheFuture(v['start_time'])}

meetup_data = {
  'events': {},
  'groups': {},
}

def _getEvents(meetup_id):
  data = utils.meetupApi(
    '%s/events?&page=%d&status=upcoming' % (meetup_id, NO_EVENTS_TO_FETCH)) #past,upcoming
  if not isinstance(data, list):
    print('!!! Unexpected shape of response for %s:\n%s' % (meetup_id, data))
    return
  meetup_data['events'][meetup_id] = data
utils.repeat(meetup_ids, 'Getting events for %s', _getEvents)


def _getGroupInfo(group_urlid):
  data = utils.meetupApi(group_urlid)
  meetup_data['groups'][group_urlid] = data
group_urlids = sorted(
  set(e['group']['urlname'] for events in meetup_data['events'].values() for e in events))
utils.repeat(group_urlids, 'Getting group info for %s', _getGroupInfo)


def diff(existing_data, data):
    if existing_data is None:
        return True

    result = False
コード例 #9
0
def model_fn(data, mode):
    """Produces a loss for the exemplar task.

  Args:
    data: Dict of inputs ("image" being the image)
    mode: model's mode: training, eval or prediction

  Returns:
    EstimatorSpec
  """

    # In this mode (called once at the end of training), we create the tf.Hub
    # module in order to export the model, and use that to do one last prediction.
    if mode == tf.estimator.ModeKeys.PREDICT:

        def model_building_fn(img, is_training):
            end_points = ss_utils.apply_model_semi(
                img,
                None,
                is_training,
                outputs={
                    'embeddings': FLAGS.triplet_embed_dim,
                    'classes': datasets.get_auxiliary_num_classes(),
                })
            return end_points, end_points['classes']

        return trainer.make_estimator(mode,
                                      predict_fn=model_building_fn,
                                      predict_input=data['image'])

    # In all other cases, we are in train/eval mode.
    images_unsup = data[0]['image']
    images_sup = data[1]['image']

    # There is one special case, typically in eval mode, when we don't want to use
    # multiple examples, but a single one. In that case, add the fake length-1
    # example dimension to the input so that everything still works.
    # i.e. turn BHWC into B1HWC
    if images_unsup.shape.ndims == 4:
        images_unsup = images_unsup[:, None, ...]
    if images_sup.shape.ndims == 4:
        images_sup = images_sup[:, None, ...]

    # Find out the number of examples that have been created per image, which
    # may be different for sup/unsup, and use that for creating the labels.
    ninstances_unsup, nexamples_unsup = images_unsup.shape[:2]
    ninstances_sup, nexamples_sup = images_sup.shape[:2]

    # Then, fold the examples into the batch.
    images_unsup = utils.into_batch_dim(images_unsup)
    images_sup = utils.into_batch_dim(images_sup)

    # If we're not doing exemplar on the unsupervised data, skip it!
    if not FLAGS.triplet_loss_unsup:
        images_unsup = None

    # Forward them both through the model. The scope is needed for tf.Hub export.
    with tf.variable_scope('module'):
        # Here, we pass both inputs to `apply_model_semi`, and so we now get
        # outputs corresponding to each in `end_points` as "classes_unsup" and
        # similar, which we will use below.
        end_points = ss_utils.apply_model_semi(
            images_unsup,
            images_sup,
            is_training=(mode == tf.estimator.ModeKeys.TRAIN),
            outputs={
                'embeddings': FLAGS.triplet_embed_dim,
                'classes': datasets.get_auxiliary_num_classes(),
            })

    # Labelled classification loss
    # =====

    # Compute the supervision loss for each example of the supervised branch.
    labels_class = utils.repeat(data[1]['label'], nexamples_sup)
    logits_class = end_points['classes_sup']
    losses_class = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels_class, logits=logits_class)
    loss_class = tf.reduce_mean(losses_class)

    if mode == tf.estimator.ModeKeys.EVAL:
        eval_metrics = (
            lambda labels_class, logits_class, losses_class: {  # pylint: disable=g-long-lambda
                'classification/top1 accuracy':
                    utils.top_k_accuracy(1, labels_class, logits_class),
                'classification/top5 accuracy':
                    utils.top_k_accuracy(5, labels_class, logits_class),
                'classification/loss': tf.metrics.mean(losses_class),
            }, [labels_class, logits_class, losses_class])

        return trainer.make_estimator(mode, loss_class, eval_metrics)

    # Exemplar triplet loss
    # =====
    losses_ex = []

    def do_triplet(embeddings, nexamples, ninstances):
        """Applies the triplet loss to the given embeddings."""
        # Empirically, normalizing the embeddings is more robust.
        embeddings = tf.nn.l2_normalize(embeddings, axis=-1)

        # Generate the labels as [0 0 0 1 1 1 ...]
        labels = utils.repeat(tf.range(ninstances), nexamples)

        # Apply batch-hard loss with a soft-margin.
        losses_tri = batch_hard(embeddings,
                                labels,
                                margin=0.0,
                                soft=True,
                                sample_pos=False,
                                sample_neg=False)
        return tf.reduce_mean(losses_tri)

    # Compute exemplar triplet loss on the unsupervised images
    if FLAGS.triplet_loss_unsup:
        loss_ex_unsup = do_triplet(end_points['embeddings_unsup'],
                                   ninstances_unsup, nexamples_unsup)
        losses_ex.append(tf.reduce_mean(loss_ex_unsup))

    # Compute exemplar triplet loss on the supervised images.
    if FLAGS.triplet_loss_sup:
        loss_ex_sup = do_triplet(end_points['embeddings_sup'], ninstances_sup,
                                 nexamples_sup)
        losses_ex.append(tf.reduce_mean(loss_ex_sup))

    loss_ex = tf.reduce_mean(losses_ex) if losses_ex else 0.0

    # Combine the two losses as a weighted average.
    loss = loss_class + FLAGS.triplet_loss_weight * loss_ex

    return trainer.make_estimator(mode, loss)
コード例 #10
0
ファイル: networkProfile.py プロジェクト: rosmod/rosmod
 def Repeat(self, num_periods):
     """Copy the current profile entries over some number of its periods."""
     keys = ['slope', 'max slope', 'latency']
     for key in keys:
         if key in self.entries:
             self.entries[key] = utils.repeat(self.entries[key], self.period, num_periods)
コード例 #11
0
    def __init__(self, sampler, FLAGS, reuse=False):
        self.N, self.K = sampler.N, sampler.K
        self.FLAGS = FLAGS
        self._sampler = sampler
        max_neisize = FLAGS.max_neisize
        H = FLAGS.embedding_dim
        L = FLAGS.layers

        with tf.variable_scope('model', reuse=reuse):

            with tf.name_scope("placeholder"):
                self.inputs = tf.placeholder(tf.int32, (None, 2))
                self.labels = tf.placeholder(tf.float32, (None, ))

            with tf.name_scope("embedding"):
                emb_node = tf.get_variable("emb_node", (self.N, H))
                emb_type = tf.get_variable("emb_type", (self.K, H))
                self.bias_table = tf.get_variable(
                    'bias_table', (self.N, ), initializer=tf.zeros_initializer)
                self.global_bias = tf.get_variable(
                    'global_bias', (), initializer=tf.zeros_initializer)
                node_type_weight = [
                    tf.get_variable("node_type_weight_" + str(i), shape=(H, H))
                    for i in range(L)
                ]
                node_type_att = [None for i in range(L)]

            with tf.name_scope("networks"):
                nodes = [None for i in range(L)]
                cands = [None for i in range(L)]
                nodes.append(tf.reshape(self.inputs, [-1]))

                for i in range(L - 1, -1, -1):
                    node_type_att[i] = tf.exp(
                        tf.matmul(
                            tf.gather(emb_node, nodes[i + 1]),
                            tf.matmul(node_type_weight[i],
                                      emb_type,
                                      transpose_b=True)))
                    node_type_att[i] *= tf.gather(sampler.node_type_mask,
                                                  nodes[i + 1])
                    node_type_att[i] /= tf.reduce_sum(node_type_att[i],
                                                      axis=1,
                                                      keepdims=True)
                    cands[i] = self.draw(
                        nodes[i + 1], sampler,
                        tf.cast(tf.ceil(node_type_att[i] * max_neisize),
                                tf.int32))
                    nodes[i] = tf.concat((nodes[i + 1], cands[i].values),
                                         axis=-1)

                regularizer = tf.contrib.layers.l2_regularizer(
                    scale=FLAGS.l2_reg)
                hiddens = [tf.gather(emb_node, nodes[0])]

                for i in range(L):
                    with tf.variable_scope("l_%d" % i):
                        cur_hidden = tf.layers.dense(
                            hiddens[i][:tf.size(nodes[i + 1])],
                            H,
                            kernel_regularizer=regularizer,
                            name='cur')
                        nei_hidden = tf.layers.dense(
                            hiddens[i][tf.size(nodes[i + 1]):],
                            H,
                            kernel_regularizer=regularizer,
                            name='nei')
                        segment_ids = repeat(tf.range(tf.size(cands[i].lens)),
                                             flatten(cands[i].lens))
                        nei_hidden_agg = tf.segment_mean(
                            nei_hidden, segment_ids)
                        nei_hidden_agg = tf.reshape(
                            tf.concat([
                                nei_hidden_agg,
                                tf.zeros((tf.size(cands[i].lens) -
                                          tf.shape(nei_hidden_agg)[0], H))
                            ],
                                      axis=0), [-1, self.K, H])
                        nei_hidden_agg_att = tf.reduce_sum(
                            nei_hidden_agg *
                            tf.expand_dims(node_type_att[i], 2),
                            axis=1)
                        next_hidden = tf.layers.dense(
                            tf.concat([cur_hidden, nei_hidden_agg_att],
                                      axis=1),
                            H,
                            kernel_regularizer=regularizer,
                            name='nxt')
                        hiddens.append(next_hidden)
                output = tf.reshape(hiddens[L], [-1, 2, H])
                bias = tf.gather(self.bias_table, self.inputs)
                self.output = tf.reduce_sum(
                    tf.multiply(output[:, 0, :], output[:, 1, :]),
                    axis=-1) + bias[:, 0] + bias[:, 1] + self.global_bias

            if reuse: return

            self.saver = tf.train.Saver()

            with tf.name_scope('loss'):
                l2_loss = tf.losses.get_regularization_loss()
                self.cost = tf.losses.mean_squared_error(
                    self.labels, self.output) + l2_loss

            with tf.name_scope("optimizer"):
                self.global_step = tf.Variable(0,
                                               name="global_step",
                                               trainable=False)
                if FLAGS.optimizer == 'adam':
                    optimizer = tf.train.AdamOptimizer(FLAGS.lrate)
                elif FLAGS.optimizer == 'ftrl':
                    optimizer = tf.train.FtrlOptimizer(FLAGS.lrate)
                else:
                    optimizer = tf.train.GradientDescentOptimizer(FLAGS.lrate)
                grads_and_vars = optimizer.compute_gradients(self.cost)
                self.train_op = optimizer.apply_gradients(
                    grads_and_vars, global_step=self.global_step)
コード例 #12
0
ファイル: erp.py プロジェクト: chubakur/probabilistic-python

def sample(erp, name=None, *params):
    """
    Generate sample from ERP
    :param erp:
    :type erp: ERP
    :param params:
    :return:
    """
    if mh.MCMC_shared.mh_flag:
        return mh.trace_update(erp, name, *params)
    return erp.sample(*params)


# def flip(p=0.5):
# return FixedERP(FlipERP(), [p])
#
#
# def uniform(low=0., high=1.):
#     return FixedERP(UniformERP(), [low, high])

# flip = partial(sample, FlipERP())
uniform = partial(sample, UniformERP())
gaussian = partial(sample, GaussianERP())

if __name__ == '__main__':
    samples = repeat(gaussian, 100000)
    plot.hist(samples, bins=30)
    plot.show()
コード例 #13
0
 def _advance_to(self, month, year):
     curr_month = self.get_month()
     curr_year = self.get_year()
     num_iter = 12 * (year - curr_year) + (month - curr_month)
     utils.repeat(self._navigate_to_next_month, num_iter)
コード例 #14
0
    def test_get_rand_empty_tile_returns_in_bounds_tile(self):
        def bounds_check():
            self.assertTrue(
                self.dmap.in_bounds(self.dmap.get_random_empty_tile()))

        repeat(bounds_check, self.RAND_RUNS)
コード例 #15
0
  for k in keys:
    r = r.get(k, None)
    if r is None: return None
  return r


fb = dict(events=[], orgs={})
def _getOrgAndEvents(like):
  fb_id = like['id']

  events = utils.facebookApi(
    '%s/events?fields=description,name,start_time,end_time,ticket_uri,place,id&'
    'since=2010-11-01T00:00:00' % fb_id, paginate=True)
  events = [
    e for e in events
    if _getFromNestedDict(e, 'place', 'location', 'country') == 'Romania'
  ]
  if not events: return

  for event in events:
    event['org_id'] = fb_id
  fb['events'] += events

  fb['orgs'][fb_id] = utils.facebookApi(
    '%s?fields=name,birthday,cover,description' % fb_id, paginate=False) 
utils.repeat(likes, 'Getting info for %s', _getOrgAndEvents)

with open('facebooks.json', 'w') as f:
  f.write(json.dumps(fb, sort_keys=True, indent=4))