Exemple #1
0
    def create_hierarchy_(self, schema, data):
        hint = self.get_hint(schema)

        if isinstance(hint, HintFileYAML):
            return ProxyFile(yaml_dump(data))

        if isinstance(schema, SchemaHash):
            if isinstance(hint, HintDir):
                return write_SchemaHash_SER_DIR(self, schema, data)

            if isinstance(hint, HintExtensions):
                return write_SchemaHash_Extensions(self, schema, data)

        if isinstance(schema, SchemaList):
            if isinstance(hint, HintDir):
                return write_SchemaList_SER_DIR(self, schema, data)

        if isinstance(schema, SchemaContext):
            if isinstance(hint, HintDir):
                return write_SchemaContext_SER_DIR(self, schema, data)

        if isinstance(schema, SchemaBytes):
            if isinstance(hint, HintFile):
                return ProxyFile(data)

        if isinstance(schema, SchemaString):
            if isinstance(hint, HintFile):
                return ProxyFile(schema.encode(data))

        if isinstance(schema, SchemaDate):
            if isinstance(hint, HintFile):
                return ProxyFile(yaml_dump(data))

        msg = 'Not implemented for %s, hint %s' % (schema, hint)
        raise ValueError(msg)
Exemple #2
0
def check_translation_diskrep_to_gitrep(disk_rep0, disk_events, disk_rep1, out):  # @UnusedVariable
    if not disk_events:
        raise ValueError('no disk events')
    repo = gitrep_from_diskrep(disk_rep0)
    wd = repo.working_tree_dir
    readback = diskrep_from_gitrep(repo)
    assert_diskreps_same(disk_rep0, readback, 'original', 'written back')
    logger.debug(wd)
    logger.debug('\n'+indent(readback.tree(), 'read back |'))
    logger.debug('\n'+indent(yaml_dump(disk_events), 'disk_events|'))
    commits = []
    for disk_event in disk_events:
        logger.debug(indent(yaml_dump(disk_event), 'disk_event | '))
        apply_disk_event_to_filesystem(wd, disk_event, repo=repo)
        
        if repo.untracked_files:
            logger.debug('adding untracked file %r' % repo.untracked_files) 
            repo.index.add(repo.untracked_files) 
            
        message = yaml_dump(disk_event)
        who = disk_event['who']
        logger.info('who: %s' % who)
        actor = who['actor']
        instance = who.get('instance', None)
        host = who.get('host', None)
        author = Actor(actor, instance)
        committer = Actor(instance, host) 
        commit = repo.index.commit(message, author=author, committer=committer)
        commits.append(commit) 
    
    res = {}
    res['repo'] = repo
    return res 
Exemple #3
0
 def __call__(self, data_event):
     from mcdp_hdb.disk_map_disk_events_from_data_events import disk_events_from_data_event
     from mcdp_hdb.disk_events import apply_disk_event_to_filesystem
     s = yaml_dump(data_event)
     logger.debug('Event #%d:\n%s' % (len(self.data_events), indent(s, '> ')) )
     self.data_events.append(data_event)
     disk_events = disk_events_from_data_event(disk_map=self.disk_map, 
                                              schema=self.view._schema, 
                                              data_rep=self.view._data, 
                                              data_event=data_event)
     
     for disk_event in disk_events:
         logger.debug('Disk event:\n%s' % yaml_dump(disk_event))
         wd = self.repo.working_dir
         apply_disk_event_to_filesystem(wd, disk_event, repo=self.repo)
         
     message = yaml_dump(data_event)
     who = data_event['who']
     if who is not None:
         actor = who['actor']
         host = who['host']
         instance = who['instance']
     else:
         actor = 'system'
         host = host_name()
         instance = 'unspecified'
         
     author = Actor(actor, '%s@%s' % (actor, instance))
     committer = Actor(instance, '%s@%s' % (instance, host))
     _commit = self.repo.index.commit(message, author=author, committer=committer)
     
     
Exemple #4
0
def replay_events(view_manager, db0, events):
    db0 = deepcopy(db0)
    for event in events:
        event_intepret(view_manager, db0, event)
        msg = '\nAfter playing event:\n'
        msg += indent(yaml_dump(event), '   event: ')
        msg += '\nthe DB is:\n'
        msg += indent(yaml_dump(db0), '   db: ')
        logger.debug(msg)
    return db0
Exemple #5
0
def assert_data_equal(schema, data1, data2):  # @UnusedVariable
    ''' 
        Checks that two datas are the same, by checking the hashcode. 
        Raises ValueError.
    '''
    from .schema import data_hash_code

    if data_hash_code(data1) != data_hash_code(data2):
        msg = 'The two datas are different'
        msg += '\n'
        msg += indent(yaml_dump(data1), ' data1 ')
        msg += '\n'
        msg += indent(yaml_dump(data2), ' data2 ')
        raise ValueError(msg)
Exemple #6
0
    def create_view_instance(self, s, data):
        try:
            s.validate(data)
        except NotValid as e:
            msg = 'Could not create view instance because the data is not valid '
            msg += 'according to the schema.'
            msg += '\n' + indent(s.__str__(), 'schema: ')
            msg += '\n' + indent(yaml_dump(data), 'data: ')
            raise_wrapped(NotValid, e, msg, compact=True)
        if s in self.s2baseclass:
            use = self.s2baseclass[s]

            class Base(use):
                pass
        else:

            class Base(object):
                pass

        if isinstance(s, SchemaContext):

            class ViewContext(ViewContext0, Base):
                pass

            try:
                view = ViewContext(view_manager=self, data=data, schema=s)
                view.init_context()
                return view
            except TypeError as e:
                msg = 'Probably due to a constructor in Base = %s' % (Base)
                if s in self.s2baseclass:
                    msg += '\n' + str(self.s2baseclass[s])
                raise_wrapped(ValueError, e, msg)

        if isinstance(s, SchemaHash):

            class ViewHash(ViewHash0, Base):
                pass

            view = ViewHash(view_manager=self, data=data, schema=s)
            view.init_hash()
            return view

        if isinstance(s, SchemaList):

            class ViewList(ViewList0, Base):
                pass

            return ViewList(view_manager=self, data=data, schema=s)

        if isinstance(s, SchemaString):
            return ViewString(view_manager=self, data=data, schema=s)

        if isinstance(s, SchemaBytes):
            return ViewBytes(view_manager=self, data=data, schema=s)

        if isinstance(s, SchemaDate):
            return ViewDate(view_manager=self, data=data, schema=s)

        raise NotImplementedError(type(s))
Exemple #7
0
 def hash_code(self):
     codes = []
     for f in sorted(self._files):
         codes.append([f, self._files[f].hash_code()])
     for d in sorted(self._directories):
         codes.append([d, self._directories[d].hash_code()])
     return get_md5(yaml_dump(codes))
Exemple #8
0
def event_interpret_(view, event):
    fs = {
        DataEvents.leaf_set: event_leaf_set_interpret,
        DataEvents.struct_set: event_struct_set_interpret,
        DataEvents.list_set: event_list_set_interpret,
        DataEvents.hash_set: event_hash_set_interpret,
        DataEvents.increment: event_increment_interpret,
        DataEvents.list_append: event_list_append_interpret,
        DataEvents.list_remove: event_list_remove_interpret,
        DataEvents.list_delete: event_list_delete_interpret,
        DataEvents.list_insert: event_list_insert_interpret,
        DataEvents.list_setitem: event_list_setitem_interpret,
        
        DataEvents.set_add: event_set_add_interpret,
        DataEvents.set_remove: event_set_remove_interpret,
        DataEvents.dict_setitem: event_dict_setitem_interpret,
        DataEvents.dict_delitem: event_dict_delitem_interpret,
        DataEvents.dict_rename: event_dict_rename_interpret,
    }
    ename = event['operation']
    intf = fs[ename]
    arguments = event['arguments']
    try:
        intf(view=view, **arguments)
    except Exception as e:
        msg = 'Could not complete the replay of this event: \n'
        msg += indent(yaml_dump(event), 'event: ')
        
        raise_wrapped(InvalidOperation, e, msg)
Exemple #9
0
def disk_event_interpret(disk_rep, disk_event):
    fs = {
        DiskEvents.disk_event_group: disk_event_disk_event_group_interpret,
        DiskEvents.dir_create: disk_event_dir_create_interpret,
        DiskEvents.dir_rename: disk_event_dir_rename_interpret,
        DiskEvents.dir_delete: disk_event_dir_delete_interpret,
        DiskEvents.file_create: disk_event_file_create_interpret,
        DiskEvents.file_modify: disk_event_file_modify_interpret,
        DiskEvents.file_delete: disk_event_file_delete_interpret,
        DiskEvents.file_rename: disk_event_file_rename_interpret,
    }
    ename = disk_event['operation']
    if not ename in fs:
        raise NotImplementedError(ename)
    intf = fs[ename]
    arguments = disk_event['arguments']
    try:
        logger.info('%s %s' % (ename, arguments))
        intf(disk_rep=disk_rep, **arguments)
    except Exception as e:
        msg = 'Could not complete the replay of this event: \n'
        msg += indent(yaml_dump(disk_event), 'disk_event: ')
        msg += '\nFor this tree:\n'
        msg += indent((disk_rep.tree()), ' disk_rep: ')
        from mcdp_hdb.memdataview import InvalidOperation
        raise_wrapped(InvalidOperation, e, msg)
def disk_events_from_data_event(disk_map, schema, data_rep, data_event):
    viewmanager = ViewManager(schema)
    view = viewmanager.create_view_instance(schema, data_rep)
    view._schema.validate(data_rep)

    # As a preliminary check, we check whether this change happened
    # inside a YAML representation.
    inside_yaml, name_of_yaml = change_was_inside_YAML(view=view,
                                                       data_event=data_event,
                                                       disk_map=disk_map)
    # If yes, then the result will be a file_modify event for the YAML file.
    if inside_yaml:
        return disk_events_from_data_event_inside_yaml(disk_map,
                                                       data_event,
                                                       view,
                                                       p=name_of_yaml)

    handlers = {
        DataEvents.leaf_set: disk_events_from_leaf_set,
        DataEvents.hash_set: disk_events_from_hash_set,
        DataEvents.dict_setitem: disk_events_from_dict_setitem,
        DataEvents.dict_delitem: disk_events_from_dict_delitem,
        DataEvents.dict_rename: disk_events_from_dict_rename,
        DataEvents.list_append: disk_events_from_list_append,
        DataEvents.list_delete: disk_events_from_list_delete,
        DataEvents.list_insert: disk_events_from_list_insert,
        DataEvents.list_remove: disk_events_from_list_remove,
    }
    operation = data_event['operation']
    if operation in handlers:
        f = handlers[operation]
        arguments = data_event['arguments']
        who = data_event['who']
        try:
            evs = f(disk_map=disk_map,
                    view=view,
                    _id='tmp-id',
                    who=who,
                    **arguments)
            _id = data_event['id']
            for i, ev in enumerate(evs):
                ev['id'] = _id + '-translated-%d' % i
            return evs
        except Exception as e:
            msg = 'Could not succesfully translate using %r:' % f.__name__
            msg += '\n' + 'Schema: ' + '\n' + indent(schema, ' schema ')
            msg += '\n' + 'Data event: ' + '\n' + indent(
                yaml_dump(data_event), ' data_event ')
            raise_wrapped(Exception, e, msg)
    else:
        raise NotImplementedError(operation)
Exemple #11
0
def raise_incorrect_format(msg, schema, data):
    msg2 = 'Incorrect format:\n'
    msg2 += indent(msg, '  ')
    if isinstance(data, str):
        datas = data
    else:
        datas = yaml_dump(data).encode('utf8')
    MAX = 512
    if len(datas) > MAX:
        datas = datas[:MAX] + ' [truncated]'
    if False:
        msg2 += '\nData:\n'
        msg2 += indent(datas, '  ')
    #     msg2 += 'repr: '+ datas.__repr__()
    raise_desc(IncorrectFormat, msg2, schema=str(schema))
Exemple #12
0
def data_hash_code(s):
    if s is None:
        return 'None'
    if isinstance(s, str):
        return get_md5(s)
    elif isinstance(s, datetime.datetime):
        return get_md5(yaml_dump(s))
    elif isinstance(s, list):
        return get_md5("-".join(map(data_hash_code, s)))
    elif isinstance(s, dict):
        keys = sorted(s)
        values = [s[k] for k in keys]
        codes = ['%s-%s' % (k, data_hash_code(v)) for k,v in zip(keys, values)]
        return data_hash_code("_".join(codes))
    else:
        msg = 'Invalid type %s' % describe_type(s)
        raise ValueError(msg)
Exemple #13
0
def read_as_user_db(dirname):
    dm = DB.dm

    hierarchy = ProxyDirectory.from_disk(dirname)

    logger.info('These are the files found:\n%s' %
                indent(hierarchy.tree(), '  '))

    user_db_schema = DB.user_db
    user_db_data = dm.interpret_hierarchy_(user_db_schema, hierarchy)

    logger.debug('user_db schema: \n' + str(user_db_schema))
    logger.debug('user_db:\n' + indent(yaml_dump(user_db_data), ' > '))

    DB.user_db.validate(user_db_data)

    user_db_view = DB.view_manager.create_view_instance(
        user_db_schema, user_db_data)
    user_db_view.set_root()
    return user_db_view
Exemple #14
0
def test_extension():
    exts = sorted(set(_.lower() for _ in MCDPConstants.exts_images))

    s = Schema()
    image = Schema()
    for ext in exts:
        image.bytes(ext, can_be_none=True)  # and can be none
    s.hash('images', image)

    l('schema', s)

    dm = DiskMap()
    dm.hint_extensions(s['images'], exts)

    d = 'contents'
    h0 = ProxyDirectory(
        directories={
            'images':
            ProxyDirectory(
                files={
                    'im1.jpg': ProxyFile(d),
                    'im2.png': ProxyFile(d),
                    'im2.jpg': ProxyFile(d)
                })
        })

    data = dm.interpret_hierarchy_(s, h0)
    l('data', yaml_dump(data))
    s.validate(data)

    h1 = dm.create_hierarchy_(s, data)
    l('h1', h1.tree())

    if h0.hash_code() != h1.hash_code():
        msg = 'They do not match'
        msg += '\n' + indent(h0.tree(), 'h0 ')
        msg += '\n' + indent(h1.tree(), 'h1 ')
        raise Exception(msg)
def check_translation_memdata_to_diskrep(schema, data_rep0, data_events,
                                         data_rep1, disk_map, out):
    view_manager = ViewManager(schema)

    # first, make sure that the data is coherent
    assert_data_events_consistent(schema, data_rep0, data_events, data_rep1)

    disk_events = []

    data_rep = deepcopy(data_rep0)
    disk_rep = disk_map.create_hierarchy_(schema, data_rep0)
    disk_rep0 = deepcopy(disk_rep)

    if os.path.exists(out):
        shutil.rmtree(out)
    if not os.path.exists(out):
        os.makedirs(out)

    def write_file_(name, what):
        name = os.path.join(out, name)
        with open(name, 'w') as f:
            f.write(what)
        logger.info('wrote on %s' % name)

    def write_file(i, n, what):
        name = '%d-%s.txt' % (i, n)
        write_file_(name, what)

    write_file_('0-aa-data_events.yaml', yaml_dump(data_events))

    for i, data_event in enumerate(data_events):
        write_file(i, 'a-disk_rep', disk_rep.tree())
        write_file(i, 'b-data_rep', yaml_dump(data_rep))
        write_file(i, 'c-data_event', yaml_dump(data_event))

        evs = disk_events_from_data_event(disk_map, schema, data_rep,
                                          data_event)
        if not evs:
            msg = 'The event resulted in 0 disk events.'
            msg += '\n' + indent(yaml_dump(data_event), ' data_event ')
            raise Exception(msg)

        write_file(i, 'd-evs', yaml_dump(evs))

        disk_events.extend(evs)

        # interpret data event
        event_intepret(view_manager, data_rep, data_event)
        write_file(i, 'e-data_rep-modified', yaml_dump(data_rep))

        disk_rep_by_translation = disk_map.create_hierarchy_(schema, data_rep)
        write_file(i, 'f-data_rep-modified-translated-to-disk_rep',
                   disk_rep_by_translation.tree())

        for disk_event in evs:
            disk_event_interpret(disk_rep, disk_event)

        write_file(i, 'g-disk_rep-with-evs-applied', disk_rep.tree())

        msg = 'Data event:\n' + indent(yaml_dump(data_event), ' data_event ')
        msg += '\nDisk events:\n' + indent(yaml_dump(evs), ' events ')
        logger.debug(msg)

        assert_equal_disk_rep(disk_rep_by_translation, disk_rep)

    logger.info('test ok, written on %s' % out)
    return dict(disk_rep0=disk_rep0,
                disk_events=disk_events,
                disk_rep=disk_rep)
Exemple #16
0
def apply_disk_event_to_filesystem(wd, disk_event, repo=None):
    '''
        Applies the disk events to the filesystem.
        
        If repo is not None, it applies the changes to the index as well.
    '''
    if repo:
        repo_index = repo.index
    else:
        repo_index = None

    def path_relative_to_repo(fn):
        repo_dir = repo.working_dir
        x = os.path.relpath(fn, repo_dir)
        #         logger.debug('%s %s - > %s ' % (repo_dir, fn, x))
        return x

    def descendants_tracked(dirname):
        ''' Yields absolute, relative (to dirname) '''
        def tracked(absolute_fn):  # @UnusedVariable
            return True  # XXX

        for root, _, files in os.walk(dirname, followlinks=False):
            for f in files:
                absolute = os.path.join(root, f)
                if tracked(absolute):
                    yield absolute, f

    def as_path(dirname, sub):
        path = os.path.join(wd, '/'.join(dirname))
        res = os.path.join(path, sub)
        return res

    def dir_create(dirname, name):
        p = as_path(dirname, name)
        os.makedirs(p)

    def dir_rename(dirname, name, name2):
        p1 = as_path(dirname, name)
        p2 = as_path(dirname, name2)
        os.rename(p1, p2)
        if repo_index:
            for _, rel in descendants_tracked(p1):
                #                 fn1 = path_relative_to_repo(os.path.join(p1, rel))
                #                 fn2 = path_relative_to_repo(os.path.join(p2, rel))
                f1 = os.path.join(p1, rel)
                f2 = os.path.join(p2, rel)
                repo_index.move([f1, f2])

    def file_rename(dirname, name, name2):
        p1 = as_path(dirname, name)
        p2 = as_path(dirname, name2)
        if repo_index:
            #             fn1 = path_relative_to_repo(p1)
            #             fn2 = path_relative_to_repo(p2)
            #             logger.debug('fn1: %s' % fn1)
            #             logger.debug('fn2: %s' % fn2)
            #             # repo_index.move(fn1, fn2)
            #             logger.debug('working dir: %s' % repo.working_dir)
            #             logger.debug('p1: %s' % p1)
            #             logger.debug('p2: %s' % p2)
            repo_index.move([p1, p2])
        else:
            os.rename(p1, p2)

    def dir_delete(dirname, name):
        p = as_path(dirname, name)
        shutil.rmtree(p)
        if repo_index:
            for absolute, _rel in descendants_tracked(p):
                fn = path_relative_to_repo(absolute)
                repo_index.remove([fn])

    def file_create(dirname, name, contents):
        p = as_path(dirname, name)
        with open(p, 'wb') as f:
            f.write(contents)
        if repo_index:
            fn = path_relative_to_repo(p)
            repo_index.add([fn])

    def file_modify(dirname, name, contents):
        p = as_path(dirname, name)
        with open(p, 'wb') as f:
            f.write(contents)
        if repo_index:
            fn = path_relative_to_repo(p)
            repo_index.add([fn])

    def file_delete(dirname, name):
        p = as_path(dirname, name)
        os.unlink(p)
        if repo_index:
            fn = path_relative_to_repo(p)
            repo_index.remove([fn])

    def disk_event_group(events):
        for event in events:
            apply_disk_event_to_filesystem(wd, event, repo=repo)

    fs = {
        DiskEvents.disk_event_group: disk_event_group,
        DiskEvents.dir_create: dir_create,
        DiskEvents.dir_rename: dir_rename,
        DiskEvents.dir_delete: dir_delete,
        DiskEvents.file_create: file_create,
        DiskEvents.file_modify: file_modify,
        DiskEvents.file_delete: file_delete,
        DiskEvents.file_rename: file_rename,
    }
    ename = disk_event['operation']
    if not ename in fs:
        raise NotImplementedError(ename)
    intf = fs[ename]
    arguments = disk_event['arguments']
    try:
        #         logger.info('Arguments: %s' % arguments)
        intf(**arguments)
    except Exception as e:
        msg = 'Could not apply this event to filesystem: \n'
        msg += indent(yaml_dump(disk_event), 'disk_event: ')
        msg += '\nwd: %s' % wd
        from mcdp_hdb.memdataview import InvalidOperation
        raise_wrapped(InvalidOperation, e, msg)
Exemple #17
0
 def notify_callback(event):
     logger.debug('\n' + yaml_dump(event))
     events.append(event)
def check_translation_diskrep_to_memdata(schema, disk_rep0, disk_events,
                                         disk_rep1, disk_map, out):
    disk_events = deepcopy(disk_events)
    assert_disk_events_consistent(disk_rep0, disk_events, disk_rep1)

    view_manager = ViewManager(schema)
    if os.path.exists(out):
        shutil.rmtree(out)
    if not os.path.exists(out):
        os.makedirs(out)

    def write_file_(name, what):
        name = os.path.join(out, name)
        with open(name, 'w') as f:
            f.write(what)
        logger.info('wrote on %s' % name)

    def write_file(i, n, what):
        name = '%d-%s.txt' % (i, n)
        write_file_(name, what)

    write_file_('0-aa-disk_events.yaml', yaml_dump(disk_events))

    # first translate the data
    disk_rep = deepcopy(disk_rep0)
    data_rep = disk_map.interpret_hierarchy_(schema, disk_rep)
    data_rep0 = deepcopy(data_rep)
    data_events = []
    i = 0
    while disk_events:
        write_file(i, 'a-disk_rep', disk_rep.tree())
        write_file(i, 'b-data_rep', yaml_dump(data_rep))
        write_file(i, 'c-disk_event', yaml_dump(disk_events[0]))

        disk_events0 = deepcopy(disk_events)
        evs, disk_events_consumed = data_events_from_disk_event_queue(
            disk_map, schema, disk_rep, disk_events)
        logger.debug('This consumed %d events' % len(disk_events_consumed))
        logger.debug('This disk_event become this data_event:' + '\n' +
                     indent(yaml_dump(disk_events0), 'disk_events : ') + '\n' +
                     indent(yaml_dump(evs), 'data_events : '))

        msg = 'Disk event:\n' + indent(yaml_dump(disk_events_consumed),
                                       ' disk_events_consumed ')
        msg += '\nData events:\n' + indent(yaml_dump(evs), ' events ')
        logger.debug(msg)

        # tmp - interpret now
        data0 = deepcopy(data_rep)
        for data_event in evs:
            event_intepret(view_manager, data0, data_event)
        # tmp
        if not evs:
            msg = 'The disk event resulted in 0 data events.'
            msg += '\n' + indent(yaml_dump(disk_events0[0]), ' disk_event ')
            raise Exception(msg)

        write_file(i, 'c-disk_event-consumed', yaml_dump(disk_events_consumed))
        write_file(i, 'd-evs', yaml_dump(evs))

        data_events.extend(evs)

        # interpret disk event
        for disk_event in disk_events_consumed:
            disk_event_interpret(disk_rep, disk_event)
        write_file(i, 'e-disk_rep-modified', disk_rep.tree())

        try:
            data_rep_by_translation = disk_map.interpret_hierarchy_(
                schema, disk_rep)
        except IncorrectFormat as exc:
            s = traceback.format_exc(exc)
            logger.error('Failed check:\n%s' % s)
            write_file(i, 'f-disk_rep-modified-translated-to-data_rep-FAIL', s)
            data_rep_by_translation = None
        else:
            write_file(i, 'f-disk_rep-modified-translated-to-data_rep',
                       yaml_dump(data_rep_by_translation))

        for data_event in evs:
            event_intepret(view_manager, data_rep, data_event)

        write_file(i, 'g-data_rep-with-evs-applied', yaml_dump(data_rep))

        if data_rep_by_translation is not None:
            h1 = data_hash_code(data_rep_by_translation)
            h2 = data_hash_code(data_rep)
            if h1 != h2:
                msg = 'Hash codes differ.'
                msg += '\n' + indent(yaml_dump(data_rep), 'data_rep ')
                msg += '\n' + indent(yaml_dump(data_rep_by_translation),
                                     'data_rep_by_tr ')
                raise Exception(msg)
        else:
            raise Exception()
        i += 1
    logger.info('test_inverse ok, written on %s' % out)
    return dict(data_rep0=data_rep0,
                data_events=data_events,
                data_rep=data_rep)
Exemple #19
0
#                 shutil.rmtree(where)
#             os.makedirs(where)
#             print('Writing data there.')
#             h2.to_disk(where)
#             with open(os.path.join(where+'.yaml'), 'w') as f:
#                 f.write(yaml.dump(shelves_data))
#

if __name__ == '__main__':
    user_db_view = read_as_user_db(sys.argv[1])
    events = []

    def notify_callback(event):
        events.append(event)

    user_db_view._notify_callback = notify_callback

    user = user_db_view.best_match(None, None, '*****@*****.**')
    print('user: %s' % user)
    user.info.email = 'new email'
    user.info.groups.append('group:new-group')
    print yaml_dump(events)

    for data_event in events:
        dm = DB.dm
        disk_event = disk_events_from_data_event(disk_map=dm,
                                                 schema=user_db_view._schema,
                                                 data_rep=user_db_view._data,
                                                 data_event=data_event)
        logger.info(yaml_dump(disk_event))
Exemple #20
0
def check_translation_gitrep_to_diskrep(repo, branch_name, out):
    wd = repo.working_tree_dir

    commits = list(reversed(list(repo.iter_commits(branch_name))))

    # make sure that commits[0] is the first
    for i in range(1, len(commits)):
        assert commits[i].parents[0] == commits[i - 1]
    repo.head.reference = commits[0]
    repo.head.reset(index=True, working_tree=True)

    disk_rep0 = ProxyDirectory.from_disk(wd)
    disk_rep = deepcopy(disk_rep0)

    if os.path.exists(out):
        shutil.rmtree(out)
    if not os.path.exists(out):
        os.makedirs(out)

    def write_file_(name, what):
        name = os.path.join(out, name)
        with open(name, 'w') as f:
            f.write(what)
        logger.info('wrote on %s' % name)

    def write_file(i, n, what):
        name = '%d-%s.txt' % (i, n)
        write_file_(name, what)

    logger.debug('Initial files: %s' %
                 list(_.path for _ in commits[1].tree.traverse()))

    msg = ""
    for i, commit in enumerate(commits):
        d = disk_rep_from_git_tree(commit.tree)
        msg += '\n\n' + indent(d.tree(), ' tree at commit #%d | ' % i)
    write_file_('00-commits.txt', msg)

    all_disk_events = []
    for i in range(1, len(commits)):
        write_file(i, 'a-disk_rep', disk_rep.tree())

        msg = ""
        for d in commits[i - 1].diff(commits[i]):
            msg += '\n' + str(d)
        write_file(i, 'c-diffs', msg)

        events = diskevents_from_diff(commits[i - 1], commits[i])
        write_file(i, 'd-diskevents_from_diff', yaml_dump(events))

        for disk_event in events:
            disk_event_interpret(disk_rep, disk_event)
        all_disk_events.extend(events)

        write_file(i, 'e-disk_rep-after-diskevents', disk_rep.tree())

        repo.head.reference = commits[i]
        repo.head.reset(index=True, working_tree=True)
        supposedly = ProxyDirectory.from_disk(wd)
        write_file(i, 'f-supposedly', supposedly.tree())

        assert_equal_disk_rep(disk_rep, supposedly)

    logger.debug('wd: %s' % wd)
    return dict(disk_rep0=disk_rep0,
                disk_events=all_disk_events,
                disk_rep=disk_rep)
Exemple #21
0
def test_view1a():

    db_schema = Schema()
    schema_user = Schema()
    schema_user.string('name')
    schema_user.string('email', can_be_none=True)
    schema_user.list('groups', SchemaString())
    db_schema.hash('users', schema_user)

    db0 = {
        'users': {
            'andrea': {
                'name': 'Andrea',
                'email': '*****@*****.**',
                'groups': ['group:admin', 'group:FDM'],
            },
            'pinco': {
                'name': 'Pinco Pallo',
                'email': None,
                'groups': ['group:FDM'],
            },
        }
    }

    db_schema.validate(db0)
    all_everything = ACLRule(MCDPConstants.ALLOW, MCDPConstants.EVERYONE,
                             MCDPConstants.Privileges.SPECIAL_ALL_WILDCARD)
    db_schema.add_acl_rules([all_everything])
    db = deepcopy(db0)

    class UserView(object):
        def get_complete_address(self):
            return '%s <%s>' % (self.name, self.email)

    viewmanager = ViewManager(db_schema)
    viewmanager.set_view_class(schema_user, UserView)
    actor = 'user:andrea'
    principals = ['user:andrea', MCDPConstants.EVERYONE]
    view = viewmanager.view(db, actor, principals)
    events = []

    def notify_callback(event):
        logger.debug('\n' + yaml_dump(event))
        events.append(event)

    view._notify_callback = notify_callback
    users = view.users

    u = users['andrea']
    assert_equal(u.name, 'Andrea')
    u.name = 'not Andrea'
    print(yaml_dump(u._data))
    assert_equal(u.name, 'not Andrea')
    assert_equal(u.get_complete_address(),
                 'not Andrea <*****@*****.**>')
    try:
        u.email = None
    except:
        raise Exception('Should have been fine')

    assert_equal(u.email, None)

    try:
        u.name = None
    except NotValid as e:
        logger.info(e)
    else:
        raise Exception('Name set to None')

    assert u._data['name'] is not None, u._data

    users['another'] = {
        'name': 'Another',
        'email': '*****@*****.**',
        'groups': []
    }

    # no email

    try:
        users['another'] = {'name': 'Another'}
        raise Exception('Expected NotValid')
    except NotValid:
        pass

    assert 'another' in users
    del users['another']
    assert 'another' not in users

    for group in u.groups:
        print('%s is in group %s' % (u.name, group))

    assert_equal(list(u.groups), ['group:admin', 'group:FDM'])

    users.rename('pinco', 'pallo')
    all_users = set(users)
    logger.info(all_users)
    assert_equal(all_users, set(['pallo', 'andrea']))

    l('db', yaml_dump(db))

    logger.info(yaml_dump(events))
    db2 = replay_events(viewmanager, db0, events)

    l('db2', yaml_dump(db2))
    assert_equal(db, db2)