mavlogdump.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. #!/usr/bin/env python
  2. '''
  3. example program that dumps a Mavlink log file. The log file is
  4. assumed to be in the format that qgroundcontrol uses, which consists
  5. of a series of MAVLink packets, each with a 64 bit timestamp
  6. header. The timestamp is in microseconds since 1970 (unix epoch)
  7. '''
  8. from __future__ import print_function
  9. import array
  10. import fnmatch
  11. import json
  12. import os
  13. import struct
  14. import sys
  15. import time
  16. try:
  17. from pymavlink.mavextra import *
  18. except:
  19. print("WARNING: Numpy missing, mathematical notation will not be supported..")
  20. from argparse import ArgumentParser
  21. parser = ArgumentParser(description=__doc__)
  22. parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
  23. parser.add_argument("--planner", action='store_true', help="use planner file format")
  24. parser.add_argument("--robust", action='store_true', help="Enable robust parsing (skip over bad data)")
  25. parser.add_argument("-f", "--follow", action='store_true', help="keep waiting for more data at end of file")
  26. parser.add_argument("--condition", default=None, help="select packets by condition")
  27. parser.add_argument("-q", "--quiet", action='store_true', help="don't display packets")
  28. parser.add_argument("-o", "--output", default=None, help="output matching packets to give file")
  29. parser.add_argument("-p", "--parms", action='store_true', help="preserve parameters in output with -o")
  30. parser.add_argument("--format", default=None, help="Change the output format between 'standard', 'json', and 'csv'. For the CSV output, you must supply types that you want.")
  31. parser.add_argument("--csv_sep", dest="csv_sep", default=",", help="Select the delimiter between columns for the output CSV file. Use 'tab' to specify tabs. Only applies when --format=csv")
  32. parser.add_argument("--types", default=None, help="types of messages (comma separated with wildcard)")
  33. parser.add_argument("--nottypes", default=None, help="types of messages not to include (comma separated with wildcard)")
  34. parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
  35. parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
  36. parser.add_argument("--no-bad-data", action='store_true', help="Don't output corrupted messages")
  37. parser.add_argument("--show-source", action='store_true', help="Show source system ID and component ID")
  38. parser.add_argument("--show-seq", action='store_true', help="Show sequence numbers")
  39. parser.add_argument("--show-types", action='store_true', help="Shows all message types available on opened log")
  40. parser.add_argument("--source-system", type=int, default=None, help="filter by source system ID")
  41. parser.add_argument("--source-component", type=int, default=None, help="filter by source component ID")
  42. parser.add_argument("--link", type=int, default=None, help="filter by comms link ID")
  43. parser.add_argument("--verbose", action='store_true', help="Dump messages in a much more verbose (but non-parseable) format")
  44. parser.add_argument("--mav10", action='store_true', help="parse as MAVLink1")
  45. parser.add_argument("--reduce", type=int, default=0, help="reduce streaming messages")
  46. parser.add_argument("log", metavar="LOG")
  47. args = parser.parse_args()
  48. if not args.mav10:
  49. os.environ['MAVLINK20'] = '1'
  50. import inspect
  51. from pymavlink import mavutil
  52. filename = args.log
  53. mlog = mavutil.mavlink_connection(filename, planner_format=args.planner,
  54. notimestamps=args.notimestamps,
  55. robust_parsing=args.robust,
  56. dialect=args.dialect,
  57. zero_time_base=args.zero_time_base)
  58. output = None
  59. if args.output:
  60. output = open(args.output, mode='wb')
  61. types = args.types
  62. if types is not None:
  63. types = types.split(',')
  64. nottypes = args.nottypes
  65. if nottypes is not None:
  66. nottypes = nottypes.split(',')
  67. ext = os.path.splitext(filename)[1]
  68. isbin = ext in ['.bin', '.BIN', '.px4log']
  69. islog = ext in ['.log', '.LOG'] # NOTE: "islog" does not mean a tlog
  70. istlog = ext in ['.tlog', '.TLOG']
  71. # list of msgs to reduce in rate when --reduce is used
  72. reduction_msgs = ['NKF*', 'XKF*', 'IMU*', 'AHR2', 'BAR*', 'ATT', 'BAT*', 'CTUN', 'NTUN', 'GP*', 'IMT*', 'MAG*', 'PL', 'POS', 'POW*', 'RATE', 'RC*', 'RFND', 'UBX*', 'VIBE', 'NKQ*', 'MOT*', 'CTRL', 'FTS*', 'DSF', 'CST*', 'LOS*', 'UWB*']
  73. reduction_yes = set()
  74. reduction_no = set()
  75. reduction_count = {}
  76. def reduce_msg(mtype, reduction_ratio):
  77. '''return True if this msg should be discarded by reduction'''
  78. global reduction_count, reduction_msgs, reduction_yes, reduction_no
  79. if mtype in reduction_no:
  80. return False
  81. if not mtype in reduction_yes:
  82. for m in reduction_msgs:
  83. if fnmatch.fnmatch(mtype, m):
  84. reduction_yes.add(mtype)
  85. reduction_count[mtype] = 0
  86. break
  87. if not mtype in reduction_yes:
  88. reduction_no.add(mtype)
  89. return False
  90. reduction_count[mtype] += 1
  91. if reduction_count[mtype] == reduction_ratio:
  92. reduction_count[mtype] = 0
  93. return False
  94. return True
  95. if args.csv_sep == "tab":
  96. args.csv_sep = "\t"
  97. def match_type(mtype, patterns):
  98. '''return True if mtype matches pattern'''
  99. for p in patterns:
  100. if fnmatch.fnmatch(mtype, p):
  101. return True
  102. return False
  103. # Write out a header row as we're outputting in CSV format.
  104. fields = ['timestamp']
  105. offsets = {}
  106. if istlog and args.format == 'csv': # we know our fields from the get-go
  107. try:
  108. currentOffset = 1 # Store how many fields in we are for each message.
  109. for type in types:
  110. try:
  111. typeClass = "MAVLink_{0}_message".format(type.lower())
  112. fields += [type + '.' + x for x in inspect.getargspec(getattr(mavutil.mavlink, typeClass).__init__).args[1:]]
  113. offsets[type] = currentOffset
  114. currentOffset += len(fields)
  115. except IndexError:
  116. quit()
  117. except TypeError:
  118. print("You must specify a list of message types if outputting CSV format via the --types argument.")
  119. exit()
  120. # The first line output are names for all columns
  121. csv_out = ["" for x in fields]
  122. print(args.csv_sep.join(fields))
  123. if isbin and args.format == 'csv': # need to accumulate columns from message
  124. if types is None or len(types) != 1:
  125. print("Need exactly one type when dumping CSV from bin file")
  126. quit()
  127. # Track the last timestamp value. Used for compressing data for the CSV output format.
  128. last_timestamp = None
  129. # Track types found
  130. available_types = set()
  131. # for DF logs pre-calculate types list
  132. match_types=None
  133. if types is not None and hasattr(mlog, 'name_to_id'):
  134. for k in mlog.name_to_id.keys():
  135. if match_type(k, types):
  136. if nottypes is not None and match_type(k, nottypes):
  137. continue
  138. if match_types is None:
  139. match_types = []
  140. match_types.append(k)
  141. if isbin and args.format == 'csv':
  142. # we need FMT messages for column headings
  143. match_types.append("FMT")
  144. # Keep track of data from the current timestep. If the following timestep has the same data, it's stored in here as well. Output should therefore have entirely unique timesteps.
  145. while True:
  146. m = mlog.recv_match(blocking=args.follow, type=match_types)
  147. if m is None:
  148. # FIXME: Make sure to output the last CSV message before dropping out of this loop
  149. break
  150. available_types.add(m.get_type())
  151. if isbin and m.get_type() == "FMT" and args.format == 'csv':
  152. if m.Name == types[0]:
  153. fields += m.Columns.split(',')
  154. csv_out = ["" for x in fields]
  155. print(args.csv_sep.join(fields))
  156. if args.reduce and reduce_msg(m.get_type(), args.reduce):
  157. continue
  158. if output is not None:
  159. if (isbin or islog) and m.get_type() == "FMT":
  160. output.write(m.get_msgbuf())
  161. continue
  162. if (isbin or islog) and (m.get_type() == "PARM" and args.parms):
  163. output.write(m.get_msgbuf())
  164. continue
  165. if m.get_type() == 'PARAM_VALUE' and args.parms:
  166. timestamp = getattr(m, '_timestamp', None)
  167. output.write(struct.pack('>Q', int(timestamp*1.0e6)) + m.get_msgbuf())
  168. continue
  169. if not mavutil.evaluate_condition(args.condition, mlog.messages):
  170. continue
  171. if args.source_system is not None and args.source_system != m.get_srcSystem():
  172. continue
  173. if args.source_component is not None and args.source_component != m.get_srcComponent():
  174. continue
  175. if args.link is not None and args.link != m._link:
  176. continue
  177. if types is not None and m.get_type() != 'BAD_DATA' and not match_type(m.get_type(), types):
  178. continue
  179. if nottypes is not None and match_type(m.get_type(), nottypes):
  180. continue
  181. # Ignore BAD_DATA messages is the user requested or if they're because of a bad prefix. The
  182. # latter case is normally because of a mismatched MAVLink version.
  183. if m.get_type() == 'BAD_DATA' and (args.no_bad_data is True or m.reason == "Bad prefix"):
  184. continue
  185. # Grab the timestamp.
  186. timestamp = getattr(m, '_timestamp', 0.0)
  187. # If we're just logging, pack in the timestamp and data into the output file.
  188. if output:
  189. if not (isbin or islog):
  190. output.write(struct.pack('>Q', int(timestamp*1.0e6)))
  191. try:
  192. output.write(m.get_msgbuf())
  193. except Exception as ex:
  194. print("Failed to write msg %s: %s" % (m.get_type(), str(ex)))
  195. # If quiet is specified, don't display output to the terminal.
  196. if args.quiet:
  197. continue
  198. # If JSON was ordered, serve it up. Split it nicely into metadata and data.
  199. if args.format == 'json':
  200. # Format our message as a Python dict, which gets us almost to proper JSON format
  201. data = m.to_dict()
  202. # Remove the mavpackettype value as we specify that later.
  203. del data['mavpackettype']
  204. # Also, if it's a BAD_DATA message, make it JSON-compatible by removing array objects
  205. if 'data' in data and type(data['data']) is not dict:
  206. data['data'] = list(data['data'])
  207. # Prepare the message as a single object with 'meta' and 'data' keys holding
  208. # the message's metadata and actual data respectively.
  209. meta = {"type": m.get_type(), "timestamp": timestamp}
  210. if args.show_source:
  211. meta["srcSystem"] = m.get_srcSystem()
  212. meta["srcComponent"] = m.get_srcComponent()
  213. # convert any array.array (e.g. packed-16-bit fft readings) into lists:
  214. for key in data.keys():
  215. if type(data[key]) == array.array:
  216. data[key] = list(data[key])
  217. outMsg = {"meta": meta, "data": data}
  218. # Now print out this object with stringified properly.
  219. print(json.dumps(outMsg))
  220. # CSV format outputs columnar data with a user-specified delimiter
  221. elif args.format == 'csv':
  222. data = m.to_dict()
  223. type = m.get_type()
  224. # If this message has a duplicate timestamp, copy its data into the existing data list. Also
  225. # do this if it's the first message encountered.
  226. if timestamp == last_timestamp or last_timestamp is None:
  227. if isbin:
  228. newData = [str(data[y]) if y != "timestamp" else "" for y in fields]
  229. else:
  230. newData = [str(data[y.split('.')[-1]]) if y.split('.')[0] == type and y.split('.')[-1] in data else "" for y in fields]
  231. for i, val in enumerate(newData):
  232. if val:
  233. csv_out[i] = val
  234. # Otherwise if this is a new timestamp, print out the old output data, and store the current message for later output.
  235. else:
  236. csv_out[0] = "{:.8f}".format(last_timestamp)
  237. print(args.csv_sep.join(csv_out))
  238. if isbin:
  239. csv_out = [str(data[y]) if y != "timestamp" else "" for y in fields]
  240. else:
  241. csv_out = [str(data[y.split('.')[-1]]) if y.split('.')[0] == type and y.split('.')[-1] in data else "" for y in fields]
  242. elif args.show_types:
  243. # do nothing
  244. pass
  245. elif args.verbose and istlog:
  246. mavutil.dump_message_verbose(sys.stdout, m)
  247. print("")
  248. else:
  249. # Otherwise we output in a standard Python dict-style format
  250. s = "%s.%02u: %s" % (time.strftime("%Y-%m-%d %H:%M:%S",
  251. time.localtime(timestamp)),
  252. int(timestamp*100.0)%100, m)
  253. if args.show_source:
  254. s += " srcSystem=%u srcComponent=%u" % (m.get_srcSystem(), m.get_srcComponent())
  255. if args.show_seq:
  256. s += " seq=%u" % m.get_seq()
  257. print(s)
  258. # Update our last timestamp value.
  259. last_timestamp = timestamp
  260. if args.show_types:
  261. for msgType in available_types:
  262. print(msgType)