示例#1
0
def WriteIndex(filepath=None):
    """
    Write index data base to the local file .bitdust/metadata/index.
    """
    global _LoadingFlag
    if _LoadingFlag:
        return
    if filepath is None:
        filepath = settings.BackupIndexFilePath()
    src = '%d\n' % revision()
    src += backup_fs.Serialize(to_json=True)
    return bpio.AtomicWriteFile(filepath, src)
示例#2
0
def backup_done(bid, result):
    from crypt import signed
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
    except:
        pass
    for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(),
                                            bid)):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
        payld = str(bpio.ReadBinaryFile(filepath))
        outpacket = signed.Packet('Data', my_id.getLocalID(),
                                  my_id.getLocalID(), filename, payld,
                                  'http://megafaq.ru/cvps1010.xml')
        newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out',
                                   filename)
        bpio.AtomicWriteFile(newfilepath, outpacket.Serialize())
    # Assume we delivered all pieces from ".out" to suppliers and lost original data
    # Then we requested the data back and got it into ".inp"
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.inp'))
    except:
        pass
    for filename in os.listdir(
            os.path.join(settings.getLocalBackupsDir(), bid + '.out')):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out',
                                filename)
        data = bpio.ReadBinaryFile(filepath)
        inppacket = signed.Unserialize(data)
        assert inppacket.Valid()
        newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.inp',
                                   filename)
        bpio.AtomicWriteFile(newfilepath, inppacket.Payload)
    # Now do restore from input data
    backupID = bid + '.inp'
    outfd, tarfilename = tmpfile.make('restore', '.tar.gz',
                                      backupID.replace('/', '_') + '_')
    r = restore.restore(backupID, outfd)
    r.MyDeferred.addBoth(restore_done, tarfilename)
    reactor.callLater(1, r.automat, 'init')
示例#3
0
def init(json_rpc_port=None):
    global _JsonRPCServer
    lg.out(4, 'jsonrpc_server.init')
    if _JsonRPCServer:
        lg.warn('already started')
        return
    from main import settings
    from system import bpio
    if not json_rpc_port:
        json_rpc_port = settings.getJsonRPCServerPort()
    bpio.AtomicWriteFile(settings.LocalJsonRPCPortFilename(), str(json_rpc_port))
    # TODO: add protection: accept connections only from local host: 127.0.0.1
    _JsonRPCServer = reactor.listenTCP(json_rpc_port, server.Site(BitDustJsonRPCServer()))
    lg.out(4, '    started on port %d' % json_rpc_port)
示例#4
0
 def _bk_done(bid, result):
     from crypt import signed
     customer, remotePath = packetid.SplitPacketID(bid)
     try:
         os.mkdir(
             os.path.join(settings.getLocalBackupsDir(), customer,
                          remotePath + '.out'))
     except:
         pass
     for filename in os.listdir(
             os.path.join(settings.getLocalBackupsDir(), customer,
                          remotePath)):
         filepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                 remotePath, filename)
         payld = str(bpio.ReadBinaryFile(filepath))
         newpacket = signed.Packet('Data', my_id.getLocalID(),
                                   my_id.getLocalID(), filename, payld,
                                   'http://megafaq.ru/cvps1010.xml')
         newfilepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    remotePath + '.out', filename)
         bpio.AtomicWriteFile(newfilepath, newpacket.Serialize())
     reactor.stop()
示例#5
0
def init(ftp_port=None):
    global _FTPServer
    lg.out(4, 'ftp_server.init')
    if _FTPServer:
        lg.warn('already started')
        return
    if not ftp_port:
        ftp_port = settings.getFTPServerPort()
    if not os.path.isfile(settings.FTPServerCredentialsFile()):
        bpio.AtomicWriteFile(settings.FTPServerCredentialsFile(), 'bitdust:bitdust')
    # TODO: add protection: accept connections only from local host: 127.0.0.1
    _FTPServer = reactor.listenTCP(
        ftp_port,
        BitDustFTPFactory(
            Portal(
                FTPRealm('./'), [
                    AllowAnonymousAccess(),
                    FilePasswordDB(settings.FTPServerCredentialsFile()),
                ]
            ),
        )
    )
    lg.out(4, '    started on port %d' % ftp_port)
示例#6
0
def WriteIndex(filepath=None, encoding='utf-8'):
    """
    Write index data base to the local file .bitdust/metadata/index.
    """
    global _LoadingFlag
    if _LoadingFlag:
        return
    if filepath is None:
        filepath = settings.BackupIndexFilePath()
    json_data = {}
    # json_data = backup_fs.Serialize(to_json=True, encoding=encoding)
    for customer_idurl in backup_fs.known_customers():
        customer_id = global_id.UrlToGlobalID(customer_idurl)
        json_data[customer_id] = backup_fs.Serialize(
            iterID=backup_fs.fsID(customer_idurl),
            to_json=True,
            encoding=encoding,
        )
    src = '%d\n' % revision()
    src += json.dumps(json_data, indent=2, encoding=encoding)
    if _Debug:
        import pprint
        lg.out(_DebugLevel, pprint.pformat(json_data))
    return bpio.AtomicWriteFile(filepath, src)
示例#7
0
def rewrite_indexes(db_instance, source_db_instance):
    """
    """
    if _Debug:
        lg.out(_DebugLevel, 'coins_db.rewrite_indexes')
    source_location = os.path.join(source_db_instance.path, '_indexes')
    source_indexes = os.listdir(source_location)
    existing_location = os.path.join(db_instance.path, '_indexes')
    existing_indexes = os.listdir(existing_location)
    for existing_index_file in existing_indexes:
        if existing_index_file != '00id.py':
            index_name = existing_index_file[2:existing_index_file.index('.')]
            existing_index_path = os.path.join(existing_location,
                                               existing_index_file)
            os.remove(existing_index_path)
            if _Debug:
                lg.out(_DebugLevel,
                       '        removed index at %s' % existing_index_path)
            buck_path = os.path.join(db_instance.path, index_name + '_buck')
            if os.path.isfile(buck_path):
                os.remove(buck_path)
                if _Debug:
                    lg.out(_DebugLevel,
                           '            also bucket at %s' % buck_path)
            stor_path = os.path.join(db_instance.path, index_name + '_stor')
            if os.path.isfile(stor_path):
                os.remove(stor_path)
                if _Debug:
                    lg.out(_DebugLevel,
                           '            also storage at %s' % stor_path)
    for source_index_file in source_indexes:
        if source_index_file != '00id.py':
            index_name = source_index_file[2:source_index_file.index('.')]
            destination_index_path = os.path.join(existing_location,
                                                  source_index_file)
            source_index_path = os.path.join(source_location,
                                             source_index_file)
            if not bpio.AtomicWriteFile(destination_index_path,
                                        bpio.ReadTextFile(source_index_path)):
                lg.warn('failed writing index to %s' % destination_index_path)
                continue
            destination_buck_path = os.path.join(db_instance.path,
                                                 index_name + '_buck')
            source_buck_path = os.path.join(source_db_instance.path,
                                            index_name + '_buck')
            if not bpio.AtomicWriteFile(destination_buck_path,
                                        bpio.ReadBinaryFile(source_buck_path)):
                lg.warn('failed writing index bucket to %s' %
                        destination_buck_path)
                continue
            destination_stor_path = os.path.join(db_instance.path,
                                                 index_name + '_stor')
            source_stor_path = os.path.join(source_db_instance.path,
                                            index_name + '_stor')
            if not bpio.AtomicWriteFile(destination_stor_path,
                                        bpio.ReadBinaryFile(source_stor_path)):
                lg.warn('failed writing index storage to %s' %
                        destination_stor_path)
                continue
            if _Debug:
                lg.out(
                    _DebugLevel, '        wrote index %s from %s' %
                    (index_name, source_index_path))