Here is a python script that parses the .har file and dumps the relative information into csv file.
import json import csv import os from urllib.parse import urlparse import mimetypes def parse_har_to_csv(har_file_path, output_csv_path): with open(har_file_path, 'r', encoding='utf-8') as f: har_data = json.load(f) entries = har_data['log']['entries'] with open(output_csv_path, 'w', newline='', encoding='utf-8') as csvfile: fieldnames = [ 'url', 'method', 'status', 'mimeType', 'file_extension', 'size_kb', 'time_ms', 'initiator' ] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for entry in entries: request = entry['request'] response = entry['response'] timings = entry['timings'] url = request.get('url', '') mimeType = response.get('content', {}).get('mimeType', '') status = response.get('status', '') method = request.get('method', '') size_kb = round(response.get('bodySize', 0) / 1024, 2) time_ms = round(entry.get('time', 0), 2) parsed_url = urlparse(url) path = parsed_url.path file_extension = os.path.splitext(path)[1][1:] or mimetypes.guess_extension(mimeType) or '' initiator = entry.get('_initiator', {}).get('type', 'unknown') writer.writerow({ 'url': url, 'method': method, 'status': status, 'mimeType': mimeType, 'file_extension': file_extension, 'size_kb': size_kb, 'time_ms': time_ms, 'initiator': initiator }) print(f"HAR parsed and saved to: {output_csv_path}") # Usage if __name__ == '__main__': har_file = 'your-file.har' # Replace with path to your HAR file output_csv = 'har_output.csv' parse_har_to_csv(har_file, output_csv)