Ejemplo n.º 1
0
 def __open_file(self, open_flags):
     self.__f_obj = open(self.__path, open_flags)
     if 'r+' in open_flags:
         if self.get_actual_size():
             logger.debug('rewrite data block %s for appending...' %
                          self.get_name())
             self.__restore_db()
Ejemplo n.º 2
0
 def _synchronize(self):
     try:
         logger.debug("synchronizing journal...")
         self.__journal.flush()
         j_data = DataBlock(self.__journal_path, actsize=True)
         is_send = self.__fabnet_gateway.put(j_data, key=self.__journal_key)
         if is_send:
             self.__is_sync = True
         self.__sync_failed = False
     except Exception, err:
         self.__sync_failed = True
         raise err
Ejemplo n.º 3
0
 def _synchronize(self):
     try:
         logger.debug('synchronizing journal...')
         self.__journal.flush()
         j_data = DataBlock(self.__journal_path, actsize=True)
         is_send = self.__fabnet_gateway.put(j_data, key=self.__journal_key)
         if is_send:
             self.__is_sync = True
         self.__sync_failed = False
     except Exception, err:
         self.__sync_failed = True
         raise err
    def transfer_data_block(self, transaction_id, seek, size, data_block, foreign_name=None):
        logger.debug('data block %s (seek=%s, size=%s) is ready for transfer'%(data_block.get_name(), seek, size))
        transaction = self.__get_transaction(transaction_id)
        transaction.append_data_block(seek, size, data_block, foreign_name)
        if transaction.is_local():
            return

        self.__tr_log_update(transaction_id, seek, size, data_block.get_name(), foreign_name)

        if transaction.is_uploading():
            self.__put_queue.put((transaction, seek))
        else:
            self.__get_queue.put((transaction, seek))
Ejemplo n.º 5
0
 def __init__(self, file_path, for_write=False):
     self.__file_path = file_path
     self.__seek = 0
     self.__cur_data_block = None
     self.__cur_db_seek = 0
     self.__transaction_id = None #used for write()
     self.__transaction = None #used for read()
     self.__unsync = False
     self.__closed = False
     self.__for_write = for_write
     self.__failed_flag = False
     self.__is_tmp_file = self.__tmp_file()
     logger.debug('opening file %s for %s...'%(file_path, 'write' if for_write else 'read'))
Ejemplo n.º 6
0
    def __parse_tr_log(self):
        def parse_int(val):
            if val in [None, 'None']:
                return None
            return int(val)

        def parse_str(val):
            if val in [None, 'None']:
                return None
            return val

        transactions = {}
        tr_log = open(self.__trlog_path, 'r')
        for line in tr_log.readlines():
            if not line:
                break

            logger.debug('processing "%s"' % line.strip())
            parts = line.split()
            transaction_id = int(parts[0])
            r_type = parts[1]
            if r_type == 'ST':
                transaction = Transaction(int(parts[2]),
                                          base64.b64decode(parts[3]),
                                          int(parts[4]), transaction_id)
                transactions[transaction_id] = [transaction, {}]
            elif r_type == 'US':
                transactions[transaction_id][0].change_status(int(parts[2]))
            elif r_type == 'UT':
                seek = int(parts[2])
                size = parse_int(parts[3])
                local_name = parse_str(parts[4])
                try:
                    foreign_name = parse_str(parts[5])
                except IndexError:
                    foreign_name = None
                cur_vals = transactions[transaction_id][1].get(
                    seek, [None, None, None])
                if size:
                    cur_vals[0] = size
                if local_name:
                    cur_vals[1] = local_name
                if foreign_name:
                    cur_vals[2] = foreign_name
                transactions[transaction_id][1][seek] = cur_vals

        tr_log.close()
        return sorted(transactions.values(),
                      cmp=lambda x, y: cmp(x[0].get_start_datetime(), y[0].
                                           get_start_datetime()))
Ejemplo n.º 7
0
 def __init__(self, file_path, for_write=False):
     self.__file_path = file_path
     self.__seek = 0
     self.__cur_data_block = None
     self.__cur_db_seek = 0
     self.__transaction_id = None  #used for write()
     self.__transaction = None  #used for read()
     self.__unsync = False
     self.__closed = False
     self.__for_write = for_write
     self.__failed_flag = False
     self.__is_tmp_file = self.__tmp_file()
     logger.debug('opening file %s for %s...' %
                  (file_path, 'write' if for_write else 'read'))
Ejemplo n.º 8
0
    def iter(self, start_record_id=None):
        JLock.lock()
        try:
            j_data = DataBlock(self.__journal_path, actsize=True)
            buf = ''
            while True:
                if len(buf) < self.RECORD_STRUCT_SIZE:
                    buf += j_data.read(1024)
                    #logger.debug('J_ITER: buf=%s'%buf.encode('hex').upper())
                    if not buf:
                        break

                #logger.debug('J_ITER: header=%s'%buf[:self.RECORD_STRUCT_SIZE].encode('hex').upper())
                item_dump_len, operation_type, record_id = struct.unpack(
                    self.RECORD_STRUCT, buf[:self.RECORD_STRUCT_SIZE])
                #logger.debug('J_ITER: buf_len=%s, item_dump_len=%s, operation_type=%s, record_id=%s'%(len(buf), item_dump_len, operation_type, record_id))
                if operation_type not in (self.OT_APPEND, self.OT_UPDATE,
                                          self.OT_REMOVE):
                    #logger.debug('J_ITER: buf=%s'%buf.encode('hex').upper())
                    raise RuntimeError(
                        'Invalid journal!!! Unknown operation type: %s' %
                        operation_type)

                if len(buf) < (self.RECORD_STRUCT_SIZE + item_dump_len):
                    buf += j_data.read(1024)

                item_dump = buf[self.
                                RECORD_STRUCT_SIZE:self.RECORD_STRUCT_SIZE +
                                item_dump_len]

                remaining_len = BLOCK_SIZE - self.RECORD_STRUCT_SIZE - item_dump_len
                to_pad_len = remaining_len % BLOCK_SIZE
                #logger.debug('J_ITER: record=%s'%buf[:self.RECORD_STRUCT_SIZE+item_dump_len+to_pad_len].encode('hex').upper())
                buf = buf[self.RECORD_STRUCT_SIZE + item_dump_len +
                          to_pad_len:]

                self.__last_record_id = record_id
                if (start_record_id is None) or (record_id > start_record_id):
                    if operation_type == self.OT_REMOVE:
                        item_md = struct.unpack('<I', item_dump)[0]
                    else:
                        item_md = AbstractMetadataObject.load_md(item_dump)
                    logger.debug(
                        'J_ITER: record_id=%s, operation_type=%s, item_md=%s' %
                        (record_id, operation_type, item_md))
                    yield record_id, operation_type, item_md
        finally:
            JLock.unlock()
Ejemplo n.º 9
0
    def run(self):
        while True:
            out_streem = data = None
            job = self.queue.get()
            data_block = None
            transaction = None
            seek = None
            try:
                if job == QUIT_JOB or self.stop_flag.is_set():
                    break

                transaction, seek = job

                data_block, _, foreign_name = transaction.get_data_block(
                    seek, noclone=False)
                if not foreign_name:
                    raise Exception('foreign name does not found for seek=%s' %
                                    seek)

                if transaction.is_failed():
                    logger.debug(
                        'Transaction {%s} is failed! Skipping data block downloading...'
                        % transaction.get_id())
                    data_block.remove()
                    continue

                self.fabnet_gateway.get(foreign_name,
                                        transaction.get_replica_count(),
                                        data_block)
                data_block.close()

                self.transactions_manager.update_transaction(transaction.get_id(), seek, \
                            is_failed=False, foreign_name=data_block.get_name())
            except Exception, err:
                events_provider.error('GetWorker',
                                      '%s failed: %s' % (transaction, err))
                logger.traceback_debug()
                try:
                    if transaction and data_block:
                        self.transactions_manager.update_transaction(transaction.get_id(), seek, \
                                    is_failed=True, foreign_name=data_block.get_name())

                        data_block.remove()
                except Exception, err:
                    logger.error('[GetWorker.__on_error] %s' % err)
                    logger.traceback_debug()
Ejemplo n.º 10
0
    def mkdir(self, path, recursive=False):
        path = to_nimbus_path(path)
        logger.debug('mkdir %s ...'%path)
        mdf = self.metadata
        if mdf.exists(path):
            raise AlreadyExistsException('Directory "%s" is already exists!'%path)

        base_path, new_dir = os.path.split(path)

        if not mdf.exists(base_path):
            if recursive:
                self.mkdir(base_path, recursive)
            else:
                raise PathException('Directory "%s" does not exists!'%base_path)

        new_dir_obj = DirectoryMD(name=new_dir)
        mdf.append(base_path, new_dir_obj)
    def __parse_tr_log(self):
        def parse_int(val):
            if val in [None, 'None']:
                return None
            return int(val)

        def parse_str(val):
            if val in [None, 'None']:
                return None
            return val

        transactions = {}
        tr_log = open(self.__trlog_path, 'r')
        for line in tr_log.readlines():
            if not line:
                break

            logger.debug('processing "%s"'%line.strip())
            parts = line.split()
            transaction_id = int(parts[0])
            r_type = parts[1]
            if r_type == 'ST':
                transaction = Transaction(int(parts[2]), base64.b64decode(parts[3]), int(parts[4]), transaction_id)
                transactions[transaction_id] = [transaction, {}] 
            elif r_type == 'US':
                transactions[transaction_id][0].change_status(int(parts[2]))
            elif r_type == 'UT':
                seek = int(parts[2])
                size = parse_int(parts[3])
                local_name = parse_str(parts[4])
                try:
                    foreign_name = parse_str(parts[5])
                except IndexError:
                    foreign_name = None
                cur_vals = transactions[transaction_id][1].get(seek, [None, None, None])
                if size:
                    cur_vals[0] = size
                if local_name:
                    cur_vals[1] = local_name
                if foreign_name:
                    cur_vals[2] = foreign_name
                transactions[transaction_id][1][seek] = cur_vals

        tr_log.close()
        return sorted(transactions.values(), cmp=lambda x,y: cmp(x[0].get_start_datetime(), y[0].get_start_datetime())) 
Ejemplo n.º 12
0
    def mkdir(self, path, recursive=False):
        path = to_nimbus_path(path)
        logger.debug('mkdir %s ...' % path)
        mdf = self.metadata
        if mdf.exists(path):
            raise AlreadyExistsException('Directory "%s" is already exists!' %
                                         path)

        base_path, new_dir = os.path.split(path)

        if not mdf.exists(base_path):
            if recursive:
                self.mkdir(base_path, recursive)
            else:
                raise PathException('Directory "%s" does not exists!' %
                                    base_path)

        new_dir_obj = DirectoryMD(name=new_dir)
        mdf.append(base_path, new_dir_obj)
Ejemplo n.º 13
0
    def iter(self, start_record_id=None):
        JLock.lock()
        try:
            j_data = DataBlock(self.__journal_path, actsize=True)
            buf = ""
            while True:
                if len(buf) < self.RECORD_STRUCT_SIZE:
                    buf += j_data.read(1024)
                    # logger.debug('J_ITER: buf=%s'%buf.encode('hex').upper())
                    if not buf:
                        break

                # logger.debug('J_ITER: header=%s'%buf[:self.RECORD_STRUCT_SIZE].encode('hex').upper())
                item_dump_len, operation_type, record_id = struct.unpack(
                    self.RECORD_STRUCT, buf[: self.RECORD_STRUCT_SIZE]
                )
                # logger.debug('J_ITER: buf_len=%s, item_dump_len=%s, operation_type=%s, record_id=%s'%(len(buf), item_dump_len, operation_type, record_id))
                if operation_type not in (self.OT_APPEND, self.OT_UPDATE, self.OT_REMOVE):
                    # logger.debug('J_ITER: buf=%s'%buf.encode('hex').upper())
                    raise RuntimeError("Invalid journal!!! Unknown operation type: %s" % operation_type)

                if len(buf) < (self.RECORD_STRUCT_SIZE + item_dump_len):
                    buf += j_data.read(1024)

                item_dump = buf[self.RECORD_STRUCT_SIZE : self.RECORD_STRUCT_SIZE + item_dump_len]

                remaining_len = BLOCK_SIZE - self.RECORD_STRUCT_SIZE - item_dump_len
                to_pad_len = remaining_len % BLOCK_SIZE
                # logger.debug('J_ITER: record=%s'%buf[:self.RECORD_STRUCT_SIZE+item_dump_len+to_pad_len].encode('hex').upper())
                buf = buf[self.RECORD_STRUCT_SIZE + item_dump_len + to_pad_len :]

                self.__last_record_id = record_id
                if (start_record_id is None) or (record_id > start_record_id):
                    if operation_type == self.OT_REMOVE:
                        item_md = struct.unpack("<I", item_dump)[0]
                    else:
                        item_md = AbstractMetadataObject.load_md(item_dump)
                    logger.debug(
                        "J_ITER: record_id=%s, operation_type=%s, item_md=%s" % (record_id, operation_type, item_md)
                    )
                    yield record_id, operation_type, item_md
        finally:
            JLock.unlock()
Ejemplo n.º 14
0
    def move(self, s_path, d_path):
        s_path = to_nimbus_path(s_path)
        d_path = to_nimbus_path(d_path)
        logger.debug('moving %s to %s ...'%(s_path, d_path))

        mdf = self.metadata
        source = mdf.find(s_path)
        if mdf.exists(d_path):
            d_obj = mdf.find(d_path)
            if d_obj.is_file():
                raise AlreadyExistsException('File %s is already exists!'%d_path)
            source.parent_dir_id = d_obj.item_id
        else:
            dst_path, new_name = os.path.split(d_path)
            source.name = new_name
            mdf.find(dst_path) #check existance

        mdf.update(source)
        logger.debug('%s is moved to %s!'%(s_path, d_path))
Ejemplo n.º 15
0
    def transfer_data_block(self,
                            transaction_id,
                            seek,
                            size,
                            data_block,
                            foreign_name=None):
        logger.debug('data block %s (seek=%s, size=%s) is ready for transfer' %
                     (data_block.get_name(), seek, size))
        transaction = self.__get_transaction(transaction_id)
        transaction.append_data_block(seek, size, data_block, foreign_name)
        if transaction.is_local():
            return

        self.__tr_log_update(transaction_id, seek, size, data_block.get_name(),
                             foreign_name)

        if transaction.is_uploading():
            self.__put_queue.put((transaction, seek))
        else:
            self.__get_queue.put((transaction, seek))
Ejemplo n.º 16
0
    def move(self, s_path, d_path):
        s_path = to_nimbus_path(s_path)
        d_path = to_nimbus_path(d_path)
        logger.debug('moving %s to %s ...' % (s_path, d_path))

        mdf = self.metadata
        source = mdf.find(s_path)
        if mdf.exists(d_path):
            d_obj = mdf.find(d_path)
            if d_obj.is_file():
                raise AlreadyExistsException('File %s is already exists!' %
                                             d_path)
            source.parent_dir_id = d_obj.item_id
        else:
            dst_path, new_name = os.path.split(d_path)
            source.name = new_name
            mdf.find(dst_path)  #check existance

        mdf.update(source)
        logger.debug('%s is moved to %s!' % (s_path, d_path))
Ejemplo n.º 17
0
    def rmdir(self, path, recursive=False):
        path = to_nimbus_path(path)
        logger.debug('rmdir %s ...' % path)
        mdf = self.metadata

        dir_obj = mdf.find(path)
        if not dir_obj.is_dir():
            raise NotDirectoryException('%s is a file!' % path)

        items = mdf.listdir(path)
        if items and not recursive:
            raise NotEmptyException('Directory "%s" is not empty!' % path)

        for item in items:
            full_path = '%s/%s' % (path, item.name)
            if item.is_file():
                self.remove_file(full_path)
            else:
                self.rmdir(full_path, recursive)

        mdf.remove(dir_obj)
Ejemplo n.º 18
0
    def rmdir(self, path, recursive=False):
        path = to_nimbus_path(path)
        logger.debug('rmdir %s ...'%path)
        mdf = self.metadata

        dir_obj = mdf.find(path)
        if not dir_obj.is_dir():
            raise NotDirectoryException('%s is a file!'%path)

        items = mdf.listdir(path)
        if items and not recursive:
            raise NotEmptyException('Directory "%s" is not empty!'%path)

        for item in items:
            full_path = '%s/%s'%(path, item.name)
            if item.is_file():
                self.remove_file(full_path)
            else:
                self.rmdir(full_path, recursive)

        mdf.remove(dir_obj)
Ejemplo n.º 19
0
    def run(self):
        while True:
            out_streem = data = None
            job = self.queue.get()
            data_block = None
            transaction = None
            seek = None
            try:
                if job == QUIT_JOB or self.stop_flag.is_set():
                    break

                transaction, seek = job

                data_block,_,foreign_name = transaction.get_data_block(seek, noclone=False)
                if not foreign_name:
                    raise Exception('foreign name does not found for seek=%s'%seek)

                if transaction.is_failed():
                    logger.debug('Transaction {%s} is failed! Skipping data block downloading...'%transaction.get_id())
                    data_block.remove()
                    continue

                self.fabnet_gateway.get(foreign_name, transaction.get_replica_count(), data_block)
                data_block.close()

                self.transactions_manager.update_transaction(transaction.get_id(), seek, \
                            is_failed=False, foreign_name=data_block.get_name())
            except Exception, err:
                events_provider.error('GetWorker','%s failed: %s'%(transaction, err))
                logger.traceback_debug()            
                try:
                    if transaction and data_block:
                        self.transactions_manager.update_transaction(transaction.get_id(), seek, \
                                    is_failed=True, foreign_name=data_block.get_name())

                        data_block.remove()
                except Exception, err:
                    logger.error('[GetWorker.__on_error] %s'%err)
                    logger.traceback_debug()            
Ejemplo n.º 20
0
 def __open_file(self, open_flags):
     self.__f_obj = open(self.__path, open_flags)
     if 'r+' in open_flags:
         if self.get_actual_size():
             logger.debug('rewrite data block %s for appending...'%self.get_name())
             self.__restore_db() 
Ejemplo n.º 21
0
                    logger.traceback_debug()            
                    raise err

                if self.__is_tmp_file:
                    status = Transaction.TS_FINISHED
                else:
                    status = Transaction.TS_LOCAL_SAVED

                if self.__transaction_id:
                    self.TRANSACTIONS_MANAGER.update_transaction_state(self.__transaction_id, status)
            else:
                if self.__cur_data_block:
                    self.__cur_data_block.close()
        finally:
            self.__closed = True
            logger.debug('file %s is closed!'%self.__file_path)
            
    def __send_data_block(self):
        self.__cur_data_block.finalize()
        if self.__cur_data_block.get_actual_size():
            self.TRANSACTIONS_MANAGER.transfer_data_block(self.__transaction_id, \
                    self.__seek, self.__cur_db_seek, self.__cur_data_block)

        self.__seek += self.__cur_db_seek
        self.__cur_db_seek = 0
        self.__cur_data_block = None
        self.__unsync = False

    def __failed_transaction(self, err):
        self.__failed_flag = True
        events_provider.critical("File", "File %s IO error: %s"%\
Ejemplo n.º 22
0
 def remove_file(self, file_path):
     file_path = to_nimbus_path(file_path)
     logger.debug('removing file %s ...'%file_path)
     self.transactions_manager.remove_file(file_path)
     logger.debug('file %s is removed!'%file_path)
Ejemplo n.º 23
0
 def remove_file(self, file_path):
     file_path = to_nimbus_path(file_path)
     logger.debug('removing file %s ...' % file_path)
     self.transactions_manager.remove_file(file_path)
     logger.debug('file %s is removed!' % file_path)
Ejemplo n.º 24
0
                    raise err

                if self.__is_tmp_file:
                    status = Transaction.TS_FINISHED
                else:
                    status = Transaction.TS_LOCAL_SAVED

                if self.__transaction_id:
                    self.TRANSACTIONS_MANAGER.update_transaction_state(
                        self.__transaction_id, status)
            else:
                if self.__cur_data_block:
                    self.__cur_data_block.close()
        finally:
            self.__closed = True
            logger.debug('file %s is closed!' % self.__file_path)

    def __send_data_block(self):
        self.__cur_data_block.finalize()
        if self.__cur_data_block.get_actual_size():
            self.TRANSACTIONS_MANAGER.transfer_data_block(self.__transaction_id, \
                    self.__seek, self.__cur_db_seek, self.__cur_data_block)

        self.__seek += self.__cur_db_seek
        self.__cur_db_seek = 0
        self.__cur_data_block = None
        self.__unsync = False

    def __failed_transaction(self, err):
        self.__failed_flag = True
        events_provider.critical("File", "File %s IO error: %s"%\