Modern TUI with Enhanced Protocol Hierarchy Interface

Major Features:
- Complete modern TUI interface with three focused views
- Enhanced multi-column layout: Source | Proto | Destination | Extended | Frame Type | Metrics
- Simplified navigation with 1/2/3 hotkeys instead of F1/F2/F3
- Protocol hierarchy: Transport (TCP/UDP) → Extended (CH10/PTP) → Frame Types
- Classic TUI preserved with --classic flag

Views Implemented:
1. Flow Analysis View: Enhanced multi-column flow overview with protocol detection
2. Packet Decoder View: Three-panel deep inspection (Flows | Frames | Fields)
3. Statistical Analysis View: Four analysis modes with timing and quality metrics

Technical Improvements:
- Left-aligned text columns with IP:port precision
- Transport protocol separation from extended protocols
- Frame type identification (CH10-Data, TMATS, PTP Sync)
- Cross-view communication with persistent flow selection
- Context-sensitive help and status bars
- Comprehensive error handling with console fallback
This commit is contained in:
2025-07-26 22:46:49 -04:00
parent 0f2fc8f92c
commit 5c2cb1a4ed
22 changed files with 4207 additions and 35 deletions

View File

@@ -2,14 +2,18 @@
Right panel - Flow details with frame type table
"""
from typing import List, Optional, Tuple
from typing import List, Optional, Tuple, Dict
import curses
from ...models import FlowStats, FrameTypeStats
class DetailPanel:
"""Right panel showing detailed flow information"""
"""Right panel showing detailed flow information with tabs"""
def __init__(self):
self.active_tab = 0 # 0 = Info, 1 = Decode
self.tabs = ["Info", "Decode"]
def draw(self, stdscr, x_offset: int, y_offset: int, width: int,
flows_list: List[FlowStats], selected_flow: int, max_height: Optional[int] = None):
@@ -31,14 +35,58 @@ class DetailPanel:
else:
max_lines = y_offset + max_height
try:
# Draw tab bar
tab_y = y_offset
self._draw_tab_bar(stdscr, x_offset, tab_y, width)
y_offset += 2 # Space for tab bar
max_lines -= 2
# Draw content based on active tab
if self.active_tab == 0:
self._draw_info_tab(stdscr, x_offset, y_offset, width, max_lines, flow, selected_frame_type)
elif self.active_tab == 1:
self._draw_decode_tab(stdscr, x_offset, y_offset, width, max_lines, flow, selected_frame_type)
except curses.error:
# Ignore curses errors from writing outside screen bounds
pass
def _draw_tab_bar(self, stdscr, x_offset: int, y_offset: int, width: int):
"""Draw the tab bar at the top of the panel"""
tab_line = ""
for i, tab_name in enumerate(self.tabs):
if i == self.active_tab:
tab_line += f"[{tab_name}]"
else:
tab_line += f" {tab_name} "
if i < len(self.tabs) - 1:
tab_line += " "
# Add tab navigation hint
tab_line += f" {' ' * (width - len(tab_line) - 15)}[Tab] to switch"
stdscr.addstr(y_offset, x_offset, tab_line[:width-1], curses.A_BOLD)
stdscr.addstr(y_offset + 1, x_offset, "" * min(width-1, 50))
def _draw_info_tab(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int,
flow: FlowStats, selected_frame_type: Optional[str]):
"""Draw the Info tab content (original detail panel content)"""
try:
# ALWAYS show flow details first
stdscr.addstr(y_offset, x_offset, f"FLOW DETAILS: {flow.src_ip} -> {flow.dst_ip}", curses.A_BOLD)
y_offset += 2
stdscr.addstr(y_offset, x_offset, f"Packets: {flow.frame_count} | Bytes: {flow.total_bytes:,}")
y_offset += 1
# Enhanced analysis information
if flow.enhanced_analysis.decoder_type != "Standard":
y_offset += 1
self._draw_enhanced_analysis(stdscr, x_offset, y_offset, width, flow)
y_offset += self._get_enhanced_analysis_lines(flow)
# Frame types table
if flow.frame_types and y_offset < max_lines:
y_offset += 1
@@ -174,4 +222,259 @@ class DetailPanel:
current_item += 1
# Fallback to first flow if selection is out of bounds
return flows_list[0] if flows_list else None, None
return flows_list[0] if flows_list else None, None
def _draw_enhanced_analysis(self, stdscr, x_offset: int, y_offset: int, width: int, flow: FlowStats):
"""Draw enhanced analysis information"""
enhanced = flow.enhanced_analysis
try:
# Enhanced analysis header
stdscr.addstr(y_offset, x_offset, f"Enhanced Analysis ({enhanced.decoder_type}):", curses.A_BOLD)
y_offset += 1
# Timing analysis for CH10
if enhanced.has_internal_timing:
stdscr.addstr(y_offset, x_offset + 2, f"Clock Drift: {enhanced.avg_clock_drift_ppm:.2f} PPM (max: {enhanced.max_clock_drift_ppm:.2f})")
y_offset += 1
stdscr.addstr(y_offset, x_offset + 2, f"Timing Quality: {enhanced.timing_quality.title()} | Stability: {enhanced.timing_stability.title()}")
y_offset += 1
if enhanced.anomaly_rate > 0:
stdscr.addstr(y_offset, x_offset + 2, f"Anomaly Rate: {enhanced.anomaly_rate:.1%} | Confidence: {enhanced.avg_confidence_score:.2f}")
y_offset += 1
# Frame quality metrics
if enhanced.avg_frame_quality > 0:
stdscr.addstr(y_offset, x_offset + 2, f"Frame Quality: {enhanced.avg_frame_quality:.1f}%")
y_offset += 1
# Error counts
errors = []
if enhanced.sequence_gaps > 0:
errors.append(f"Seq: {enhanced.sequence_gaps}")
if enhanced.rtc_sync_errors > 0:
errors.append(f"RTC: {enhanced.rtc_sync_errors}")
if enhanced.format_errors > 0:
errors.append(f"Fmt: {enhanced.format_errors}")
if enhanced.overflow_errors > 0:
errors.append(f"Ovf: {enhanced.overflow_errors}")
if errors:
stdscr.addstr(y_offset, x_offset + 2, f"Errors: {' | '.join(errors)}")
y_offset += 1
# Data analysis
if enhanced.channel_count > 0:
channel_info = f"Channels: {enhanced.channel_count}"
if enhanced.analog_channels > 0:
channel_info += f" (Analog: {enhanced.analog_channels})"
if enhanced.pcm_channels > 0:
channel_info += f" (PCM: {enhanced.pcm_channels})"
if enhanced.tmats_frames > 0:
channel_info += f" (TMATS: {enhanced.tmats_frames})"
stdscr.addstr(y_offset, x_offset + 2, channel_info)
y_offset += 1
# Primary data type
if enhanced.primary_data_type != "Unknown":
stdscr.addstr(y_offset, x_offset + 2, f"Primary Type: {enhanced.primary_data_type}")
y_offset += 1
except curses.error:
pass
def _get_enhanced_analysis_lines(self, flow: FlowStats) -> int:
"""Calculate how many lines the enhanced analysis will take"""
enhanced = flow.enhanced_analysis
lines = 1 # Header line
if enhanced.has_internal_timing:
lines += 2 # Clock drift + timing quality
if enhanced.anomaly_rate > 0:
lines += 1 # Anomaly rate
if enhanced.avg_frame_quality > 0:
lines += 1 # Frame quality
# Check if we have errors to display
if any([enhanced.sequence_gaps, enhanced.rtc_sync_errors,
enhanced.format_errors, enhanced.overflow_errors]):
lines += 1
if enhanced.channel_count > 0:
lines += 1 # Channel info
if enhanced.primary_data_type != "Unknown":
lines += 1 # Primary data type
return lines
def _draw_decode_tab(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int,
flow: FlowStats, selected_frame_type: Optional[str]):
"""Draw the Decode tab content showing decoded frame information"""
if flow.enhanced_analysis.decoder_type == "Standard":
stdscr.addstr(y_offset, x_offset, "No enhanced decoder available for this flow", curses.A_DIM)
return
# Header for decode information
stdscr.addstr(y_offset, x_offset, f"DECODED DATA ({flow.enhanced_analysis.decoder_type}):", curses.A_BOLD)
y_offset += 2
# Tree view of decoded information
if flow.enhanced_analysis.decoder_type == "Chapter10_Enhanced":
self._draw_ch10_decode_tree(stdscr, x_offset, y_offset, width, max_lines, flow)
else:
stdscr.addstr(y_offset, x_offset, f"Decoder type '{flow.enhanced_analysis.decoder_type}' display not implemented")
def _draw_ch10_decode_tree(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int, flow: FlowStats):
"""Draw Chapter 10 decoded information in tree format"""
enhanced = flow.enhanced_analysis
tree_items = [
("📋 Header Information", None, 0),
(" Decoder Type", enhanced.decoder_type, 1),
(" Primary Data Type", enhanced.primary_data_type, 1),
(" Channel Count", str(enhanced.channel_count) if enhanced.channel_count > 0 else "Unknown", 1),
("", None, 0), # Spacer
("⏱️ Timing Analysis", None, 0),
(" Has Internal Timing", "Yes" if enhanced.has_internal_timing else "No", 1),
(" Timing Quality", enhanced.timing_quality.title(), 1),
(" Clock Drift (avg)", f"{enhanced.avg_clock_drift_ppm:.2f} PPM" if enhanced.avg_clock_drift_ppm != 0 else "N/A", 1),
(" Clock Drift (max)", f"{enhanced.max_clock_drift_ppm:.2f} PPM" if enhanced.max_clock_drift_ppm != 0 else "N/A", 1),
(" Timing Stability", enhanced.timing_stability.title(), 1),
(" Anomaly Rate", f"{enhanced.anomaly_rate:.1%}" if enhanced.anomaly_rate > 0 else "0%", 1),
(" Confidence Score", f"{enhanced.avg_confidence_score:.2f}" if enhanced.avg_confidence_score > 0 else "N/A", 1),
("", None, 0), # Spacer
("📊 Quality Metrics", None, 0),
(" Frame Quality (avg)", f"{enhanced.avg_frame_quality:.1f}%" if enhanced.avg_frame_quality > 0 else "N/A", 1),
(" Sequence Gaps", str(enhanced.sequence_gaps), 1),
(" RTC Sync Errors", str(enhanced.rtc_sync_errors), 1),
(" Format Errors", str(enhanced.format_errors), 1),
(" Overflow Errors", str(enhanced.overflow_errors), 1),
("", None, 0), # Spacer
("📡 Channel Information", None, 0),
(" Total Channels", str(enhanced.channel_count), 1),
(" Analog Channels", str(enhanced.analog_channels), 1),
(" PCM Channels", str(enhanced.pcm_channels), 1),
(" TMATS Frames", str(enhanced.tmats_frames), 1),
]
# Add decoded frame samples if available
if enhanced.sample_decoded_fields:
tree_items.extend([
("", None, 0), # Spacer
("🔍 Decoded Frame Samples", None, 0),
])
for frame_key, frame_data in enhanced.sample_decoded_fields.items():
tree_items.append((f" {frame_key.replace('_', ' ').title()}", None, 1))
# Show important fields first
priority_fields = ['packet_timestamp', 'internal_timestamp', 'data_type_name', 'channel_id',
'sequence_number', 'frame_quality_score', 'rtc_sync_error']
# Add priority fields
for field_name in priority_fields:
if field_name in frame_data:
display_value = self._format_field_value(field_name, frame_data[field_name])
tree_items.append((f" {field_name.replace('_', ' ').title()}", display_value, 2))
# Add other fields (up to 5 more)
other_fields = [k for k in frame_data.keys() if k not in priority_fields]
for i, field_name in enumerate(other_fields[:5]):
display_value = self._format_field_value(field_name, frame_data[field_name])
tree_items.append((f" {field_name.replace('_', ' ').title()}", display_value, 2))
if len(other_fields) > 5:
tree_items.append((f" ... and {len(other_fields) - 5} more fields", "", 2))
# Add available fields summary
if enhanced.available_field_names:
tree_items.extend([
("", None, 0), # Spacer
("📝 Available Decoder Fields", None, 0),
(" Total Fields Available", str(len(enhanced.available_field_names)), 1),
(" Field Categories", self._categorize_fields(enhanced.available_field_names), 1),
])
# Render tree items
current_y = y_offset
for label, value, indent_level in tree_items:
if current_y >= max_lines:
break
if not label: # Spacer line
current_y += 1
continue
indent = " " * indent_level
if value is None: # Category header
line = f"{indent}{label}"
stdscr.addstr(current_y, x_offset, line[:width-1], curses.A_BOLD)
else: # Key-value pair
line = f"{indent}{label}: {value}"
stdscr.addstr(current_y, x_offset, line[:width-1])
current_y += 1
# Add scrolling hint if content is cut off
if current_y >= max_lines and len(tree_items) > (max_lines - y_offset):
if max_lines > 0:
stdscr.addstr(max_lines - 1, x_offset, "... (content truncated)", curses.A_DIM)
def switch_tab(self):
"""Switch to the next tab"""
self.active_tab = (self.active_tab + 1) % len(self.tabs)
def _format_field_value(self, field_name: str, value) -> str:
"""Format field value for display"""
if value is None:
return "N/A"
# Special formatting for different field types
if "timestamp" in field_name.lower():
try:
return f"{float(value):.6f}s"
except:
return str(value)
elif "error" in field_name.lower() or field_name.endswith("_error"):
return "Yes" if value else "No"
elif "quality" in field_name.lower() and isinstance(value, (int, float)):
return f"{value:.1f}%"
elif isinstance(value, float):
return f"{value:.3f}"
elif isinstance(value, bool):
return "Yes" if value else "No"
else:
return str(value)
def _categorize_fields(self, field_names: List[str]) -> str:
"""Categorize available fields into groups"""
categories = {
"Timing": 0,
"Quality": 0,
"Data": 0,
"Header": 0,
"Other": 0
}
for field_name in field_names:
lower_name = field_name.lower()
if any(keyword in lower_name for keyword in ["time", "timestamp", "rtc", "drift"]):
categories["Timing"] += 1
elif any(keyword in lower_name for keyword in ["quality", "error", "sync", "confidence"]):
categories["Quality"] += 1
elif any(keyword in lower_name for keyword in ["data", "analog", "pcm", "channel", "sample"]):
categories["Data"] += 1
elif any(keyword in lower_name for keyword in ["header", "type", "id", "sequence", "length"]):
categories["Header"] += 1
else:
categories["Other"] += 1
# Format as compact string
active_categories = [f"{k}:{v}" for k, v in categories.items() if v > 0]
return ", ".join(active_categories)