Exemple #1
0
 def storage_get_metadata(self, tracker_client, store_serv,
                          remote_file_name):
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     remote_filename_len = len(remote_file_name)
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
     th.cmd = STORAGE_PROTO_CMD_GET_METADATA
     try:
         th.send_header(store_conn)
         # meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
         meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN,
                                  remote_filename_len)
         send_buffer = struct.pack(meta_fmt, store_serv.group_name,
                                   remote_file_name.encode())
         tcp_send_data(store_conn, send_buffer)
         th.recv_header(store_conn)
         # if th.status == 2:
         #    raise DataError('[-] Error: Remote file %s has no meta data.'
         #                    % (store_serv.group_name + __os_sep__.encode() + remote_file_name))
         if th.status != 0:
             raise DataError('[-] Error:%d, %s' %
                             (th.status, os.strerror(th.status)))
         if th.pkg_len == 0:
             ret_dict = {}
         meta_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dict = fdfs_unpack_metadata(meta_buffer)
     return ret_dict
Exemple #2
0
 def regenerate_appender_filename(self, appender_filename: bytes):
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = STORAGE_PROTO_CMD_REGENERATE_APPENDER_FILENAME
     appender_filename_len = len(appender_filename)
     # th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
     th.pkg_len = appender_filename_len
     try:
         th.send_header(store_conn)
         # truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
         #              -appender_filename(len)-|
         body_fmt = '!%ds' % appender_filename_len
         send_buffer = struct.pack(body_fmt, appender_filename)
         print('send_buffer', send_buffer)
         tcp_send_data(store_conn, send_buffer)
         th.recv_header(store_conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
         response, recv_size = tcp_recv_response(store_conn, th.pkg_len)
         print('regenerate response', response)
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dict = dict()
     ret_dict['Status'] = 'Regenerate appender name ok'
     ret_dict['Storage IP'] = store_conn.remote_addr
     ret_dict['Remote file_id'] = __os_sep__.join(parse_file_id(response))
     return ret_dict
Exemple #3
0
 def storage_delete_file(self, tracker_client, store_serv, remote_filename):
     '''
     Delete file from storage server.
     '''
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = STORAGE_PROTO_CMD_DELETE_FILE
     file_name_len = len(remote_filename)
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
     try:
         th.send_header(store_conn)
         # del_fmt: |-group_name(16)-filename(len)-|
         del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
         send_buffer = struct.pack(del_fmt, store_serv.group_name,
                                   remote_filename)
         tcp_send_data(store_conn, send_buffer)
         th.recv_header(store_conn)
         # if th.status == 2:
         #    raise DataError('[-] Error: remote file %s is not exist.'
         #                    % (store_serv.group_name + __os_sep__.encode() + remote_filename))
         if th.status != 0:
             raise DataError('Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
             # recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
     except:
         raise
     finally:
         self.pool.release(store_conn)
     remote_filename = store_serv.group_name + __os_sep__.encode(
     ) + remote_filename
     return ('Delete file successed.', remote_filename, store_serv.ip_addr)
Exemple #4
0
 def _storage_do_modify_file(self, tracker_client, store_serv, upload_type,
                             filebuffer, offset, filesize,
                             appender_filename):
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
     appender_filename_len = len(appender_filename)
     th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
     try:
         th.send_header(store_conn)
         # modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
         modify_fmt = '!Q Q Q %ds' % appender_filename_len
         send_buffer = struct.pack(modify_fmt, appender_filename_len,
                                   offset, filesize, appender_filename)
         tcp_send_data(store_conn, send_buffer)
         if upload_type == FDFS_UPLOAD_BY_FILENAME:
             upload_size = tcp_send_file(store_conn, filebuffer)
         elif upload_type == FDFS_UPLOAD_BY_BUFFER:
             tcp_send_data(store_conn, filebuffer)
         elif upload_type == FDFS_UPLOAD_BY_FILE:
             upload_size = tcp_send_file_ex(store_conn, filebuffer)
         th.recv_header(store_conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dict = {}
     ret_dict['Status'] = 'Modify successed.'
     ret_dict['Storage IP'] = store_serv.ip_addr
     return ret_dict
Exemple #5
0
 def _storage_do_truncate_file(self, tracker_client, store_serv,
                               truncated_filesize, appender_filename):
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
     appender_filename_len = len(appender_filename)
     th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
     try:
         th.send_header(store_conn)
         # truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
         #              -appender_filename(len)-|
         truncate_fmt = '!Q Q %ds' % appender_filename_len
         send_buffer = struct.pack(truncate_fmt, appender_filename_len,
                                   truncated_filesize, appender_filename)
         tcp_send_data(store_conn, send_buffer)
         th.recv_header(store_conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dict = {}
     ret_dict['Status'] = 'Truncate successed.'
     ret_dict['Storage IP'] = store_serv.ip_addr
     return ret_dict
 def _storage_do_append_file(self, tracker_client, store_serv, file_buffer, file_size, upload_type,
                             appended_filename):
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     appended_filename_len = len(appended_filename)
     th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appended_filename_len + file_size
     th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
     try:
         th.send_header(store_conn)
         # append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
         #             -filecontent(filesize)-|
         append_fmt = '!Q Q %ds' % appended_filename_len
         send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename)
         tcp_send_data(store_conn, send_buffer)
         if upload_type == FDFS_UPLOAD_BY_FILENAME:
             tcp_send_file(store_conn, file_buffer)
         elif upload_type == FDFS_UPLOAD_BY_BUFFER:
             tcp_send_data(store_conn, file_buffer)
         elif upload_type == FDFS_UPLOAD_BY_FILE:
             tcp_send_file_ex(store_conn, file_buffer)
         th.recv_header(store_conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dict = {}
     ret_dict['Status'] = 'Append file successed.'
     ret_dict['Appender file name'] = store_serv.group_name + __os_sep__.encode() + appended_filename
     ret_dict['Appended size'] = appromix(file_size)
     ret_dict['Storage IP'] = store_serv.ip_addr
     return ret_dict
def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
    '''
    Receive file from server, fragmented it while receiving and write to disk.
    arguments:
    @conn: connection
    @local_filename: string
    @file_size: int, remote file size
    @buffer_size: int, receive buffer size
    @Return int: file size if success else raise ConnectionError.
    '''
    total_file_size = 0
    flush_size = 0
    remain_bytes = file_size
    with open(local_filename, 'wb+') as f:
        while remain_bytes > 0:
            try:
                if remain_bytes >= buffer_size:
                    file_buffer, recv_size = tcp_recv_response(conn, buffer_size, \
                                                               buffer_size)
                else:
                    file_buffer, recv_size = tcp_recv_response(conn, remain_bytes, \
                                                               buffer_size)
                f.write(file_buffer)
                remain_bytes -= buffer_size
                total_file_size += recv_size
                flush_size += recv_size
                if flush_size >= 4096:
                    f.flush()
                    flush_size = 0
            except ConnectionError, e:
                raise ConnectionError(
                    '[-] Error: while downloading file(%s).' % e.args)
            except IOError, e:
                raise DataError('[-] Error: while writting local file(%s).' %
                                e.args)
Exemple #8
0
 def tracker_list_all_groups(self):
     conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
     try:
         th.send_header(conn)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
     except:
         raise
     finally:
         self.pool.release(conn)
     gi = Group_info()
     gi_fmt_size = gi.get_fmt_size()
     if recv_size % gi_fmt_size != 0:
         errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (
             th.pkg_len, recv_size)
         raise ResponseError(errmsg)
     num_groups = recv_size / gi_fmt_size
     ret_dict = {}
     ret_dict['Groups count'] = num_groups
     gi_list = []
     i = 0
     while num_groups:
         gi.set_info(recv_buffer[i * gi_fmt_size:(i + 1) * gi_fmt_size])
         gi_list.append(gi)
         gi = Group_info()
         i += 1
         num_groups -= 1
     ret_dict['Groups'] = gi_list
     return ret_dict
Exemple #9
0
 def tracker_query_storage_stor_without_group(self):
     '''Query storage server for upload, without group name.
     Return: Storage_server object'''
     conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
     try:
         th.send_header(conn)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
         if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
             errmsg = '[-] Error: Tracker response length is invaild, '
             errmsg += 'expect: %d, actual: %d' % (
                 TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
             raise ResponseError(errmsg)
     except ConnectionError:
         raise
     finally:
         self.pool.release(conn)
     # recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
     recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN,
                                  IP_ADDRESS_SIZE - 1)
     store_serv = Storage_server()
     (group_name, ip_addr, store_serv.port,
      store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
     store_serv.group_name = group_name.strip(b'\x00')
     store_serv.ip_addr = ip_addr.strip(b'\x00')
     return store_serv
Exemple #10
0
def tcp_send_file_ex(conn, filename, buffer_size=4096):
    '''
    Send file to server. Using linux system call 'sendfile'.
    arguments:
    @conn: connection
    @filename: string
    @return long, sended size
    '''
    if 'linux' not in sys.platform.lower():
        raise DataError(
            '[-] Error: \'sendfile\' system call only available on linux.')
    nbytes = 0
    offset = 0
    sock_fd = conn.get_sock().fileno()
    with open(filename, 'rb') as f:
        in_fd = f.fileno()
        while 1:
            try:
                pass
                # sent = sendfile(sock_fd, in_fd, offset, buffer_size)
                # if 0 == sent:
                #     break
                # nbytes += sent
                # offset += sent
            except OSError as e:
                if e.errno == errno.EAGAIN:
                    continue
                raise
    return nbytes
Exemple #11
0
def tcp_send_file(conn, filename, buffer_size=1024):
    '''
    Send file to server, and split into multiple pkgs while sending.
    arguments:
    @conn: connection
    @filename: string
    @buffer_size: int ,send buffer size
    @Return int: file size if success else raise ConnectionError.
    '''
    file_size = 0
    with open(filename, 'rb') as f:
        while 1:
            try:
                send_buffer = f.read(buffer_size)
                send_size = len(send_buffer)
                if send_size == 0:
                    break
                tcp_send_data(conn, send_buffer)
                file_size += send_size
            except ConnectionError as e:
                raise ConnectionError('[-] Error while uploading file(%s).' %
                                      e.args)
            except IOError as e:
                raise DataError('[-] Error while reading local file(%s).' %
                                e.args)
    return file_size
Exemple #12
0
 def set_info(self, bytes_stream):
     (group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
      self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
      self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
     try:
         self.group_name = group_name.strip(b'\x00')
         self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
         self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
         self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
     except ValueError:
         raise DataError('[-] Error disk space overrun, can not represented it.')
Exemple #13
0
 def query_file_info(self, group_name: bytes, remote_file_id: bytes):
     store_conn = self.pool.get_connection()
     body = struct.pack('!16s %ds' % len(remote_file_id), group_name, remote_file_id)
     header = struct.pack('!QBB', len(body), 22, 0)
     store_conn.get_sock().send(header)
     store_conn.get_sock().send(body)
     ret = store_conn.get_sock().recv(1024)
     ret_len, ret_cmd, ret_status = struct.unpack('!QBB', ret[:10])
     if ret_status != 0:
         raise DataError('[-] Error: %d, %s' % (ret_status, os.strerror(ret_status)))
     file_size, create_timestamp, crc32, source_ip_addr = struct.unpack('!QQQ16s', ret[10:])
     return file_size, create_timestamp, crc32, source_ip_addr
Exemple #14
0
 def _storage_do_download_file(self, tracker_client, store_serv,
                               file_buffer, offset, download_size,
                               download_type, remote_filename):
     '''
     Core of download file from storage server.
     You can choice download type, optional FDFS_DOWNLOAD_TO_FILE or 
     FDFS_DOWNLOAD_TO_BUFFER. And you can choice file offset.
     @Return dictionary
         'Remote file name' : remote_filename,
         'Content' : local_filename or buffer,
         'Download size'   : download_size,
         'Storage IP'      : storage_ip
     '''
     store_conn = self.pool.get_connection()
     th = Tracker_header()
     remote_filename_len = len(remote_filename)
     th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
     th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
     try:
         th.send_header(store_conn)
         # down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
         down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN,
                                      remote_filename_len)
         send_buffer = struct.pack(down_fmt, offset, download_size,
                                   store_serv.group_name, remote_filename)
         tcp_send_data(store_conn, send_buffer)
         th.recv_header(store_conn)
         # if th.status == 2:
         #    raise DataError('[-] Error: remote file %s is not exist.' %
         #                    (store_serv.group_name + __os_sep__.encode() + remote_filename))
         if th.status != 0:
             raise DataError('Error: %d %s' %
                             (th.status, os.strerror(th.status)))
         if download_type == FDFS_DOWNLOAD_TO_FILE:
             total_recv_size = tcp_recv_file(store_conn, file_buffer,
                                             th.pkg_len)
         elif download_type == FDFS_DOWNLOAD_TO_BUFFER:
             recv_buffer, total_recv_size = tcp_recv_response(
                 store_conn, th.pkg_len)
     except:
         raise
     finally:
         self.pool.release(store_conn)
     ret_dic = {
         'Remote file_id':
         store_serv.group_name + __os_sep__.encode() + remote_filename,
         'Content': file_buffer
         if download_type == FDFS_DOWNLOAD_TO_FILE else recv_buffer,
         'Download size': appromix(total_recv_size),
         'Storage IP': store_serv.ip_addr
     }
     return ret_dic
 def tracker_list_servers(self, group_name, storage_ip=None):
     '''
     List servers in a storage group
     '''
     conn = self.pool.get_connection()
     th = Tracker_header()
     ip_len = len(storage_ip) if storage_ip else 0
     if ip_len >= IP_ADDRESS_SIZE:
         ip_len = IP_ADDRESS_SIZE - 1
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
     th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
     group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
     store_ip_addr = storage_ip or ''
     storage_ip_fmt = '!%ds' % ip_len
     try:
         th.send_header(conn)
         send_buffer = struct.pack(group_fmt, group_name) + \
                         struct.pack(storage_ip_fmt, store_ip_addr)
         tcp_send_data(conn, send_buffer)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
         si = Storage_info()
         si_fmt_size = si.get_fmt_size()
         recv_size = len(recv_buffer)
         if recv_size % si_fmt_size != 0:
             errinfo = '[-] Error: response size not match, expect: %d, actual: %d' \
                                % (th.pkg_len, recv_size)
             raise ResponseError(errinfo)
     except ConnectionError:
         conn.disconnect()
         raise
     finally:
         self.pool.release(conn)
     num_storage = recv_size / si_fmt_size
     si_list = []
     i = 0
     while num_storage:
         si.set_info(recv_buffer[(i * si_fmt_size):((i + 1) * si_fmt_size)])
         si_list.append(si)
         si = Storage_info()
         num_storage -= 1
         i += 1
     ret_dict = {}
     ret_dict['Group name'] = group_name
     ret_dict['Servers'] = si_list
     return ret_dict
Exemple #16
0
 def _tracker_do_query_storage(self, group_name, filename, cmd):
     '''
     core of query storage, based group name and filename. 
     It is useful download, delete and set meta.
     arguments:
     @group_name: string
     @filename: string. remote file_id
     @Return: Storage_server object
     '''
     conn = self.pool.get_connection()
     th = Tracker_header()
     file_name_len = len(filename)
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
     th.cmd = cmd
     th.send_header(conn)
     # query_fmt: |-group_name(16)-filename(file_name_len)-|
     query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
     send_buffer = struct.pack(query_fmt, group_name.encode(),
                               filename.encode())
     try:
         tcp_send_data(conn, send_buffer)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
         if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
             errmsg = '[-] Error: Tracker response length is invaild, '
             errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
             raise ResponseError(errmsg)
     except ConnectionError:
         raise
     finally:
         self.pool.release(conn)
     # recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
     recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN,
                                IP_ADDRESS_SIZE - 1)
     store_serv = Storage_server()
     (group_name, ipaddr,
      store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
     store_serv.group_name = group_name.strip(b'\x00')
     store_serv.ip_addr = ipaddr.strip(b'\x00')
     return store_serv
Exemple #17
0
 def tracker_list_one_group(self, group_name):
     conn = self.pool.get_connection()
     th = Tracker_header()
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
     th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
     #group_fmt: |-group_name(16)-|
     group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
     try:
         th.send_header(conn)
         send_buffer = struct.pack(group_fmt, group_name)
         tcp_send_data(conn, send_buffer)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
         group_info = Group_info()
         group_info.set_info(recv_buffer)
     except ConnectionError:
         raise
     finally:
         self.pool.release(conn)
     return group_info   
 def tracker_query_storage_stor_with_group(self, group_name):
     '''Query storage server for upload, based group name.
     arguments:
     @group_name: string
     @Return Storage_server object
     '''
     conn = self.pool.get_connection()
     th = Tracker_header()
     th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
     th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
     th.send_header(conn)
     group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
     send_buffer = struct.pack(group_fmt, group_name.encode())
     try:
         tcp_send_data(conn, send_buffer)
         th.recv_header(conn)
         if th.status != 0:
             raise DataError('Error: %d, %s' %
                             (th.status, os.strerror(th.status)))
         recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
         if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
             errmsg = '[-] Error: Tracker response length is invaild, '
             errmsg += 'expect: %d, actual: %d' \
                         % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
             raise ResponseError(errmsg)
     except ConnectionError:
         raise
     finally:
         self.pool.release(conn)
     #recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
     recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN,
                                  IP_ADDRESS_SIZE - 1)
     store_serv = Storage_server()
     (group, ip_addr, \
      store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
     store_serv.group_name = bytes.decode(group).strip('\x00')
     store_serv.ip_addr = bytes.decode(ip_addr).strip('\x00')
     return store_serv
Exemple #19
0
    def _storage_do_upload_file(self,
                                tracker_client,
                                store_serv,
                                file_buffer,
                                file_size=None,
                                upload_type=None,
                                meta_dict=None,
                                cmd=None,
                                master_filename=None,
                                prefix_name=None,
                                file_ext_name=None):
        '''
        core of upload file.
        arguments:
        @tracker_client: Tracker_client, it is useful connect to tracker server
        @store_serv: Storage_server, it is return from query tracker server
        @file_buffer: string, file name or file buffer for send
        @file_size: int
        @upload_type: int, optional: FDFS_UPLOAD_BY_FILE, FDFS_UPLOAD_BY_FILENAME,
                                     FDFS_UPLOAD_BY_BUFFER
        @meta_dic: dictionary, store metadata in it
        @cmd: int, reference fdfs protol
        @master_filename: string, useful upload slave file
        @prefix_name: string
        @file_ext_name: string
        @Return dictionary 
                 {
                     'Group name'      : group_name,
                     'Remote file_id'  : remote_file_id,
                     'Status'          : status,
                     'Local file name' : local_filename,
                     'Uploaded size'   : upload_size,
                     'Storage IP'      : storage_ip
                 }

        '''

        store_conn = self.pool.get_connection()
        th = Tracker_header()
        master_filename_len = len(master_filename) if master_filename else 0
        prefix_name_len = len(prefix_name) if prefix_name else 0
        upload_slave = len(store_serv.group_name) and master_filename_len
        file_ext_name = str(file_ext_name) if file_ext_name else ''
        # non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
        non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
        # slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
        #           -master_name(master_filename_len)-|
        slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN,
                                          FDFS_FILE_EXT_NAME_MAX_LEN,
                                          master_filename_len)
        th.pkg_len = struct.calcsize(
            slave_fmt) if upload_slave else struct.calcsize(non_slave_fmt)
        th.pkg_len += file_size
        th.cmd = cmd
        th.send_header(store_conn)
        if upload_slave:
            send_buffer = struct.pack(slave_fmt, master_filename_len,
                                      file_size, prefix_name, file_ext_name,
                                      master_filename)
        else:
            send_buffer = struct.pack(non_slave_fmt,
                                      store_serv.store_path_index, file_size,
                                      file_ext_name.encode())
        try:
            tcp_send_data(store_conn, send_buffer)
            if upload_type == FDFS_UPLOAD_BY_FILENAME:
                send_file_size = tcp_send_file(store_conn, file_buffer)
            elif upload_type == FDFS_UPLOAD_BY_BUFFER:
                tcp_send_data(store_conn, file_buffer)
            elif upload_type == FDFS_UPLOAD_BY_FILE:
                send_file_size = tcp_send_file_ex(store_conn, file_buffer)
            th.recv_header(store_conn)
            if th.status != 0:
                raise DataError('[-] Error: %d, %s' %
                                (th.status, os.strerror(th.status)))
            recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
            if recv_size <= FDFS_GROUP_NAME_MAX_LEN:
                errmsg = '[-] Error: Storage response length is not match, '
                errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
                raise ResponseError(errmsg)
            # recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
            recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN,
                                     th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
            (group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
            remote_filename = remote_name.strip(b'\x00')
            if meta_dict and len(meta_dict) > 0:
                status = self.storage_set_metadata(tracker_client, store_serv,
                                                   remote_filename, meta_dict)
                if status != 0:
                    # rollback
                    self.storage_delete_file(tracker_client, store_serv,
                                             remote_filename)
                    raise DataError('[-] Error: %d, %s' %
                                    (status, os.strerror(status)))
        except:
            raise
        finally:
            self.pool.release(store_conn)
        ret_dic = {
            'Group name':
            group_name.strip(b'\x00'),
            'Remote file_id':
            group_name.strip(b'\x00') + __os_sep__.encode() + remote_filename,
            'Status':
            'Upload successed.',
            'Local file name':
            file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME
                            or upload_type == FDFS_UPLOAD_BY_FILE) else '',
            'Uploaded size':
            appromix(send_file_size) if
            (upload_type == FDFS_UPLOAD_BY_FILENAME
             or upload_type == FDFS_UPLOAD_BY_FILE) else appromix(
                 len(file_buffer)),
            'Storage IP':
            store_serv.ip_addr
        }
        return ret_dic