我有3个脚本:
当我尝试同时发送多个 API 请求时,发送 packetA 的 scriptB 部分工作正常,并且几乎同时发送多个 packetA。然而,它只会嗅探一个数据包B。 看起来,嗅探器或计时器正在使用某种只允许其中一个工作的共享资源。 我尝试使用标准 sniff() 和 AsyncSniffer()。这似乎并不重要。 我尝试将 scriptB 作为单独的子进程调用来调用,以避免共享资源。这似乎并不重要。 我尝试使用同时相同/相似的 AsyncSniffer 实例运行多个 shell。他们似乎没有互相踩到。
我彻底困惑了。有什么想法吗?
这是我的代码:
脚本C:
from tornado.ioloop import *
import tornado.web
from tornado import gen
from tornado.escape import json_decode
from scapy.all import *
import random
import json
import time
import scriptA
import subprocess
def checkAr(Ar,keyname,defaultval=None,strict=False):
if keyname in Ar:
return Ar[keyname]
else:
if strict == True and defaultval == None:
raise KeyError
else:
return defaultval
def incr_Arrval(Arr):
#just do an inplace increment the first variable in the array. allows for global mutability
Arr[0]=Arr[0]+1
return Arr
class send_my_request(tornado.web.RequestHandler):
global myVal, process_running
@gen.coroutine
def post(self):
process_running=True
error_message =''
try:
if "{" in self.request.body.decode('utf8') :
myargs = json.loads(self.request.body.decode('utf8').replace("\'", "\""))
val2 = str(checkAr(myargs,"val2",None,True))
else:
#e.g. application/x-www-form-urlencoded
val2 = str(self.get_argument("val2"))
except (tornado.web.MissingArgumentError, ValueError,KeyError) as e:
error_message = f"Invalid or missing argument: {str(e)}"
self.set_status(400)
process_running=False
if process_running :
command1="python3 scriptB.py --action=send --myVal=" + myVal + " --val2="+ val2 + ""
json_data = yield self.run_command(command)
self.set_header("Content-Type", "application/json")
self.write(json_data)
else:
if error_message=='':
error_message="error connection is timed out or not established"
self.set_status(400)
self.write({"error": error_message})
@gen.coroutine
def run_command(self, command):
process = tornado.process.Subprocess([command], stdout=subprocess.PIPE, shell=True)
yield process.wait_for_exit(raise_error=False)
output = process.stdout.read()
json =output.decode('utf-8')
print("this is the output : " + json)
raise gen.Return(json)
def make_app():
return tornado.web.Application([(r"/sendrequest", send_my_request),],debug = True,autoreload = True)
if __name__ == "__main__":
myVal=['']
scriptA.startup(myVal)
time.sleep(2)
app = make_app()
port=889899
print("ready for request")
app.listen(port)
#print(f"Server is listening on localhost on port {port}")
tornado.ioloop.IOLoop.current().start()
scriptB的相关部分(LayerType1和LayerType2内置于scapy中):
def get_id_and_pkt(pkt,myId,ids):
a=False
try:
current = pkt[LayerType1]
while current:
a= MyLayerR(current.data)[LayerType2].layerid
if str(a) == myId:
ids[myId]=current.data
return True
else:
current = current.getlayer(LayerType1, 2) #some packets have many LayerType1s so check them all by moving which is current
except:
#a= b'0' #dummy value
a=False
return a
def send_and_read(my_built_packet,destip,val2):
myId=my_built_packet[LayerType1].data[LayerType2].layerid
sniffer=None
try:
ids[myId]=''
except NameError:
ids={}
ids[myId]=''
#run the sniffer until the id is myId or timeout. if it is present add the packet to ids[myId]
sniffer = AsyncSniffer(filter = 'src host '+destip,lfilter=lambda x: LayerType1 in x and get_id_and_pkt(x,myId,ids),store=0,timeout=10,count=1)
sniffer.start()
while not(sniffer.running):
print("haven't started yet") # this shouldn't happen but should prevent inconsistencies if something lags
time.sleep(0.02)
send(my_built_packet,verbose=0)
while sniffer.running:
time.sleep(0.01)
if len(ids[myId])==0:
data = {"errorCode": '9999'}
del ids[myId]
if sniffer.running:
sniffer.stop()
sniffer=None
else:
#read response
data=read_response( ids.pop(myId) )
if sniffer.running: #should not be
sniffer.stop()
sniffer=None
json_data = json.dumps(data)
return json_data
我很高兴找到不同的答案,但我认为最好的答案可能是有一个没有超时的 AsyncSniffer,它将所有可能匹配的数据包存储在全局字典“ids”中。
然后,发送后,我可以轮询“ids”10 秒,以查找索引 == myId 的条目。
这是内存方面的权衡,我必须有一个单独的函数来清除“ids”中的旧条目,但它应该可以防止共享资源出现任何问题,而且实际上可能会更快。