Modern TUI with Enhanced Protocol Hierarchy Interface
Major Features: - Complete modern TUI interface with three focused views - Enhanced multi-column layout: Source | Proto | Destination | Extended | Frame Type | Metrics - Simplified navigation with 1/2/3 hotkeys instead of F1/F2/F3 - Protocol hierarchy: Transport (TCP/UDP) → Extended (CH10/PTP) → Frame Types - Classic TUI preserved with --classic flag Views Implemented: 1. Flow Analysis View: Enhanced multi-column flow overview with protocol detection 2. Packet Decoder View: Three-panel deep inspection (Flows | Frames | Fields) 3. Statistical Analysis View: Four analysis modes with timing and quality metrics Technical Improvements: - Left-aligned text columns with IP:port precision - Transport protocol separation from extended protocols - Frame type identification (CH10-Data, TMATS, PTP Sync) - Cross-view communication with persistent flow selection - Context-sensitive help and status bars - Comprehensive error handling with console fallback
This commit is contained in:
@@ -5,6 +5,8 @@ Flow tracking and management
|
||||
from typing import Dict, Set, Tuple
|
||||
from ..models import FlowStats, FrameTypeStats
|
||||
from ..protocols import Chapter10Dissector, PTPDissector, IENADissector, StandardProtocolDissectors
|
||||
from ..protocols.enhanced_chapter10 import EnhancedChapter10Decoder
|
||||
from ..plugins.ch10_timing_analysis import Chapter10TimingAnalysisPlugin
|
||||
|
||||
try:
|
||||
from scapy.all import Packet, IP, UDP, TCP
|
||||
@@ -28,6 +30,10 @@ class FlowManager:
|
||||
'iena': IENADissector()
|
||||
}
|
||||
self.standard_dissectors = StandardProtocolDissectors()
|
||||
|
||||
# Enhanced protocol decoders
|
||||
self.enhanced_ch10_decoder = EnhancedChapter10Decoder()
|
||||
self.ch10_timing_plugin = Chapter10TimingAnalysisPlugin()
|
||||
|
||||
def process_packet(self, packet: Packet, frame_num: int) -> None:
|
||||
"""Process a single packet and update flow statistics"""
|
||||
@@ -93,6 +99,9 @@ class FlowManager:
|
||||
frame_type = self._classify_frame_type(packet, dissection_results)
|
||||
self._update_frame_type_stats(flow, frame_type, frame_num, timestamp, packet_size)
|
||||
|
||||
# Enhanced analysis for Chapter 10 flows
|
||||
self._perform_enhanced_analysis(packet, flow, frame_num, transport_info)
|
||||
|
||||
# Calculate inter-arrival time
|
||||
if len(flow.timestamps) > 1:
|
||||
inter_arrival = timestamp - flow.timestamps[-2]
|
||||
@@ -354,6 +363,122 @@ class FlowManager:
|
||||
|
||||
return transport_info
|
||||
|
||||
def _perform_enhanced_analysis(self, packet: Packet, flow: FlowStats, frame_num: int, transport_info: Dict):
|
||||
"""Perform enhanced analysis using specialized decoders"""
|
||||
|
||||
# Check if this packet can be analyzed by enhanced CH10 decoder
|
||||
# Use basic dissector detection as a trigger for enhanced analysis
|
||||
if "CHAPTER10" in flow.detected_protocol_types or "CH10" in flow.detected_protocol_types:
|
||||
confidence = 1.0 # High confidence since basic dissector already detected it
|
||||
else:
|
||||
confidence = self.enhanced_ch10_decoder.can_decode(packet, transport_info)
|
||||
|
||||
if confidence > 0.5:
|
||||
# Decode frame with full field extraction
|
||||
frame_data = self.enhanced_ch10_decoder.decode_frame(packet, transport_info)
|
||||
|
||||
if frame_data:
|
||||
# Update flow with enhanced decoder type
|
||||
if flow.enhanced_analysis.decoder_type == "Standard":
|
||||
flow.enhanced_analysis.decoder_type = "Chapter10_Enhanced"
|
||||
flow.detected_protocol_types.add("Chapter10")
|
||||
|
||||
# Run timing analysis plugin
|
||||
flow_duration = (flow.timestamps[-1] - flow.timestamps[0]) if len(flow.timestamps) > 1 else 1.0
|
||||
flow_context = {
|
||||
'flow_duration': flow_duration,
|
||||
'flow_key': f"{flow.src_ip}->{flow.dst_ip}"
|
||||
}
|
||||
|
||||
timing_result = self.ch10_timing_plugin.analyze_frame(frame_data, flow_context)
|
||||
|
||||
# Update enhanced analysis data
|
||||
self._update_enhanced_analysis_data(flow, frame_data, timing_result)
|
||||
|
||||
def _update_enhanced_analysis_data(self, flow: FlowStats, frame_data, timing_result):
|
||||
"""Update the enhanced analysis data structure"""
|
||||
|
||||
enhanced = flow.enhanced_analysis
|
||||
|
||||
# Store sample decoded field data for display (keep first few samples)
|
||||
if len(enhanced.sample_decoded_fields) < 5: # Store up to 5 sample frames
|
||||
frame_sample = {}
|
||||
# Get all available fields from this frame
|
||||
for field_name in self.enhanced_ch10_decoder.supported_fields:
|
||||
field_value = frame_data.get_field(field_name.name)
|
||||
if field_value is not None:
|
||||
frame_sample[field_name.name] = field_value
|
||||
|
||||
if frame_sample: # Only store if we got some data
|
||||
enhanced.sample_decoded_fields[f"frame_{len(enhanced.sample_decoded_fields)}"] = frame_sample
|
||||
|
||||
# Update available field names list
|
||||
if not enhanced.available_field_names:
|
||||
enhanced.available_field_names = [field.name for field in self.enhanced_ch10_decoder.supported_fields]
|
||||
|
||||
# Update timing analysis
|
||||
if timing_result.internal_timestamp is not None:
|
||||
enhanced.has_internal_timing = True
|
||||
|
||||
# Update running averages for timing
|
||||
if timing_result.clock_drift_ppm is not None:
|
||||
if enhanced.avg_clock_drift_ppm == 0:
|
||||
enhanced.avg_clock_drift_ppm = timing_result.clock_drift_ppm
|
||||
else:
|
||||
# Simple running average
|
||||
enhanced.avg_clock_drift_ppm = (enhanced.avg_clock_drift_ppm + timing_result.clock_drift_ppm) / 2
|
||||
|
||||
enhanced.max_clock_drift_ppm = max(enhanced.max_clock_drift_ppm, abs(timing_result.clock_drift_ppm))
|
||||
|
||||
# Update timing quality (use most recent)
|
||||
enhanced.timing_quality = timing_result.timing_quality
|
||||
|
||||
# Update anomaly rate
|
||||
if timing_result.anomaly_detected:
|
||||
enhanced.anomaly_rate = (enhanced.anomaly_rate * (flow.frame_count - 1) + 1) / flow.frame_count
|
||||
else:
|
||||
enhanced.anomaly_rate = (enhanced.anomaly_rate * (flow.frame_count - 1)) / flow.frame_count
|
||||
|
||||
# Update confidence score
|
||||
if enhanced.avg_confidence_score == 0:
|
||||
enhanced.avg_confidence_score = timing_result.confidence_score
|
||||
else:
|
||||
enhanced.avg_confidence_score = (enhanced.avg_confidence_score + timing_result.confidence_score) / 2
|
||||
|
||||
# Update frame quality
|
||||
frame_quality = frame_data.get_field('frame_quality_score', 0)
|
||||
if frame_quality > 0:
|
||||
if enhanced.avg_frame_quality == 0:
|
||||
enhanced.avg_frame_quality = frame_quality
|
||||
else:
|
||||
enhanced.avg_frame_quality = (enhanced.avg_frame_quality + frame_quality) / 2
|
||||
|
||||
# Update error counts
|
||||
if frame_data.get_field('rtc_sync_error', False):
|
||||
enhanced.rtc_sync_errors += 1
|
||||
if frame_data.get_field('format_error', False):
|
||||
enhanced.format_errors += 1
|
||||
if frame_data.get_field('overflow_error', False):
|
||||
enhanced.overflow_errors += 1
|
||||
|
||||
# Update channel information
|
||||
channel_id = frame_data.get_field('channel_id', 0)
|
||||
if channel_id > 0:
|
||||
enhanced.channel_count = max(enhanced.channel_count, channel_id)
|
||||
|
||||
# Update data type counters
|
||||
if frame_data.get_field('is_analog_data', False):
|
||||
enhanced.analog_channels = max(enhanced.analog_channels, 1)
|
||||
if frame_data.get_field('is_pcm_data', False):
|
||||
enhanced.pcm_channels = max(enhanced.pcm_channels, 1)
|
||||
if frame_data.get_field('is_tmats_data', False):
|
||||
enhanced.tmats_frames += 1
|
||||
|
||||
# Set primary data type
|
||||
data_type_name = frame_data.get_field('data_type_name', 'Unknown')
|
||||
if enhanced.primary_data_type == "Unknown":
|
||||
enhanced.primary_data_type = data_type_name
|
||||
|
||||
def _classify_traffic(self, dst_ip: str) -> str:
|
||||
"""Classify traffic as Unicast, Multicast, or Broadcast based on destination IP"""
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user