Major Features: - Complete modern TUI interface with three focused views - Enhanced multi-column layout: Source | Proto | Destination | Extended | Frame Type | Metrics - Simplified navigation with 1/2/3 hotkeys instead of F1/F2/F3 - Protocol hierarchy: Transport (TCP/UDP) → Extended (CH10/PTP) → Frame Types - Classic TUI preserved with --classic flag Views Implemented: 1. Flow Analysis View: Enhanced multi-column flow overview with protocol detection 2. Packet Decoder View: Three-panel deep inspection (Flows | Frames | Fields) 3. Statistical Analysis View: Four analysis modes with timing and quality metrics Technical Improvements: - Left-aligned text columns with IP:port precision - Transport protocol separation from extended protocols - Frame type identification (CH10-Data, TMATS, PTP Sync) - Cross-view communication with persistent flow selection - Context-sensitive help and status bars - Comprehensive error handling with console fallback
480 lines
22 KiB
Python
480 lines
22 KiB
Python
"""
|
||
Right panel - Flow details with frame type table
|
||
"""
|
||
|
||
from typing import List, Optional, Tuple, Dict
|
||
import curses
|
||
|
||
from ...models import FlowStats, FrameTypeStats
|
||
|
||
|
||
class DetailPanel:
|
||
"""Right panel showing detailed flow information with tabs"""
|
||
|
||
def __init__(self):
|
||
self.active_tab = 0 # 0 = Info, 1 = Decode
|
||
self.tabs = ["Info", "Decode"]
|
||
|
||
def draw(self, stdscr, x_offset: int, y_offset: int, width: int,
|
||
flows_list: List[FlowStats], selected_flow: int, max_height: Optional[int] = None):
|
||
"""Draw detailed information panel for selected flow or frame type"""
|
||
|
||
if not flows_list:
|
||
stdscr.addstr(y_offset, x_offset, "No flows available")
|
||
return
|
||
|
||
# Get the selected flow and frame type
|
||
flow, selected_frame_type = self._get_selected_flow_and_frame_type(flows_list, selected_flow)
|
||
if not flow:
|
||
stdscr.addstr(y_offset, x_offset, "No flow selected")
|
||
return
|
||
|
||
if max_height is None:
|
||
height, _ = stdscr.getmaxyx()
|
||
max_lines = height - y_offset - 2
|
||
else:
|
||
max_lines = y_offset + max_height
|
||
|
||
try:
|
||
# Draw tab bar
|
||
tab_y = y_offset
|
||
self._draw_tab_bar(stdscr, x_offset, tab_y, width)
|
||
y_offset += 2 # Space for tab bar
|
||
max_lines -= 2
|
||
|
||
# Draw content based on active tab
|
||
if self.active_tab == 0:
|
||
self._draw_info_tab(stdscr, x_offset, y_offset, width, max_lines, flow, selected_frame_type)
|
||
elif self.active_tab == 1:
|
||
self._draw_decode_tab(stdscr, x_offset, y_offset, width, max_lines, flow, selected_frame_type)
|
||
|
||
except curses.error:
|
||
# Ignore curses errors from writing outside screen bounds
|
||
pass
|
||
|
||
def _draw_tab_bar(self, stdscr, x_offset: int, y_offset: int, width: int):
|
||
"""Draw the tab bar at the top of the panel"""
|
||
tab_line = ""
|
||
for i, tab_name in enumerate(self.tabs):
|
||
if i == self.active_tab:
|
||
tab_line += f"[{tab_name}]"
|
||
else:
|
||
tab_line += f" {tab_name} "
|
||
|
||
if i < len(self.tabs) - 1:
|
||
tab_line += " "
|
||
|
||
# Add tab navigation hint
|
||
tab_line += f" {' ' * (width - len(tab_line) - 15)}[Tab] to switch"
|
||
|
||
stdscr.addstr(y_offset, x_offset, tab_line[:width-1], curses.A_BOLD)
|
||
stdscr.addstr(y_offset + 1, x_offset, "─" * min(width-1, 50))
|
||
|
||
def _draw_info_tab(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int,
|
||
flow: FlowStats, selected_frame_type: Optional[str]):
|
||
"""Draw the Info tab content (original detail panel content)"""
|
||
try:
|
||
# ALWAYS show flow details first
|
||
stdscr.addstr(y_offset, x_offset, f"FLOW DETAILS: {flow.src_ip} -> {flow.dst_ip}", curses.A_BOLD)
|
||
y_offset += 2
|
||
|
||
stdscr.addstr(y_offset, x_offset, f"Packets: {flow.frame_count} | Bytes: {flow.total_bytes:,}")
|
||
y_offset += 1
|
||
|
||
# Enhanced analysis information
|
||
if flow.enhanced_analysis.decoder_type != "Standard":
|
||
y_offset += 1
|
||
self._draw_enhanced_analysis(stdscr, x_offset, y_offset, width, flow)
|
||
y_offset += self._get_enhanced_analysis_lines(flow)
|
||
|
||
# Frame types table
|
||
if flow.frame_types and y_offset < max_lines:
|
||
y_offset += 1
|
||
stdscr.addstr(y_offset, x_offset, "Frame Types:", curses.A_BOLD)
|
||
y_offset += 1
|
||
|
||
# Table header
|
||
header = f"{'Type':<12} {'#Pkts':<6} {'Bytes':<8} {'Avg ΔT':<8} {'2σ Out':<6}"
|
||
stdscr.addstr(y_offset, x_offset, header, curses.A_UNDERLINE)
|
||
y_offset += 1
|
||
|
||
sorted_frame_types = sorted(flow.frame_types.items(), key=lambda x: x[1].count, reverse=True)
|
||
for frame_type, ft_stats in sorted_frame_types:
|
||
if y_offset >= max_lines:
|
||
break
|
||
|
||
avg_str = f"{ft_stats.avg_inter_arrival:.3f}s" if ft_stats.avg_inter_arrival > 0 else "N/A"
|
||
bytes_str = f"{ft_stats.total_bytes:,}" if ft_stats.total_bytes < 10000 else f"{ft_stats.total_bytes/1000:.1f}K"
|
||
outliers_count = len(ft_stats.outlier_details) if ft_stats.outlier_details else 0
|
||
|
||
# Truncate frame type name if too long
|
||
type_name = frame_type[:11] if len(frame_type) > 11 else frame_type
|
||
|
||
ft_line = f"{type_name:<12} {ft_stats.count:<6} {bytes_str:<8} {avg_str:<8} {outliers_count:<6}"
|
||
stdscr.addstr(y_offset, x_offset, ft_line)
|
||
y_offset += 1
|
||
|
||
# Timing statistics
|
||
if y_offset < max_lines:
|
||
y_offset += 1
|
||
stdscr.addstr(y_offset, x_offset, "Timing:", curses.A_BOLD)
|
||
y_offset += 1
|
||
|
||
if flow.avg_inter_arrival > 0:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Avg: {flow.avg_inter_arrival:.6f}s")
|
||
y_offset += 1
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Std: {flow.std_inter_arrival:.6f}s")
|
||
y_offset += 1
|
||
else:
|
||
stdscr.addstr(y_offset, x_offset + 2, "No timing data")
|
||
y_offset += 1
|
||
|
||
# Display outlier frame details for each frame type
|
||
if flow.frame_types and y_offset < max_lines:
|
||
outlier_frame_types = [(frame_type, ft_stats) for frame_type, ft_stats in flow.frame_types.items()
|
||
if ft_stats.outlier_details]
|
||
|
||
if outlier_frame_types:
|
||
y_offset += 1
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset, "Outlier Frames:", curses.A_BOLD)
|
||
y_offset += 1
|
||
|
||
for frame_type, ft_stats in outlier_frame_types:
|
||
if y_offset >= max_lines:
|
||
break
|
||
|
||
# Display frame type header
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"{frame_type}:", curses.A_UNDERLINE)
|
||
y_offset += 1
|
||
|
||
# Display outlier details as individual table rows in format "frame# | deltaT"
|
||
for frame_num, frame_inter_arrival_time in ft_stats.outlier_details:
|
||
if y_offset >= max_lines:
|
||
break
|
||
outlier_line = f"{frame_num} | {frame_inter_arrival_time:.3f}s"
|
||
stdscr.addstr(y_offset, x_offset + 4, outlier_line)
|
||
y_offset += 1
|
||
|
||
# If a frame type is selected, show additional frame type specific details
|
||
if selected_frame_type and selected_frame_type in flow.frame_types and y_offset < max_lines:
|
||
ft_stats = flow.frame_types[selected_frame_type]
|
||
|
||
# Add separator
|
||
y_offset += 2
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset, "─" * min(width-2, 40))
|
||
y_offset += 1
|
||
|
||
# Frame type specific header
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset, f"FRAME TYPE: {selected_frame_type}", curses.A_BOLD)
|
||
y_offset += 2
|
||
|
||
# Frame type specific info
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset, f"Count: {ft_stats.count}")
|
||
y_offset += 1
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset, f"Bytes: {ft_stats.total_bytes:,}")
|
||
y_offset += 1
|
||
|
||
# Frame type timing
|
||
if y_offset < max_lines:
|
||
y_offset += 1
|
||
stdscr.addstr(y_offset, x_offset, "Timing:", curses.A_BOLD)
|
||
y_offset += 1
|
||
|
||
if ft_stats.avg_inter_arrival > 0:
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Avg: {ft_stats.avg_inter_arrival:.6f}s")
|
||
y_offset += 1
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Std: {ft_stats.std_inter_arrival:.6f}s")
|
||
y_offset += 1
|
||
else:
|
||
if y_offset < max_lines:
|
||
stdscr.addstr(y_offset, x_offset + 2, "No timing data")
|
||
y_offset += 1
|
||
|
||
except curses.error:
|
||
# Ignore curses errors from writing outside screen bounds
|
||
pass
|
||
|
||
def _get_selected_flow_and_frame_type(self, flows_list: List[FlowStats],
|
||
selected_flow: int) -> Tuple[Optional[FlowStats], Optional[str]]:
|
||
"""Get the currently selected flow and frame type based on selection index"""
|
||
current_item = 0
|
||
|
||
for flow in flows_list:
|
||
if current_item == selected_flow:
|
||
return flow, None # Selected the main flow
|
||
current_item += 1
|
||
|
||
# Check frame types for this flow
|
||
if flow.frame_types:
|
||
sorted_frame_types = sorted(flow.frame_types.items(), key=lambda x: x[1].count, reverse=True)
|
||
for frame_type, ft_stats in sorted_frame_types:
|
||
if current_item == selected_flow:
|
||
return flow, frame_type # Selected a frame type
|
||
current_item += 1
|
||
|
||
# Fallback to first flow if selection is out of bounds
|
||
return flows_list[0] if flows_list else None, None
|
||
|
||
def _draw_enhanced_analysis(self, stdscr, x_offset: int, y_offset: int, width: int, flow: FlowStats):
|
||
"""Draw enhanced analysis information"""
|
||
enhanced = flow.enhanced_analysis
|
||
|
||
try:
|
||
# Enhanced analysis header
|
||
stdscr.addstr(y_offset, x_offset, f"Enhanced Analysis ({enhanced.decoder_type}):", curses.A_BOLD)
|
||
y_offset += 1
|
||
|
||
# Timing analysis for CH10
|
||
if enhanced.has_internal_timing:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Clock Drift: {enhanced.avg_clock_drift_ppm:.2f} PPM (max: {enhanced.max_clock_drift_ppm:.2f})")
|
||
y_offset += 1
|
||
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Timing Quality: {enhanced.timing_quality.title()} | Stability: {enhanced.timing_stability.title()}")
|
||
y_offset += 1
|
||
|
||
if enhanced.anomaly_rate > 0:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Anomaly Rate: {enhanced.anomaly_rate:.1%} | Confidence: {enhanced.avg_confidence_score:.2f}")
|
||
y_offset += 1
|
||
|
||
# Frame quality metrics
|
||
if enhanced.avg_frame_quality > 0:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Frame Quality: {enhanced.avg_frame_quality:.1f}%")
|
||
y_offset += 1
|
||
|
||
# Error counts
|
||
errors = []
|
||
if enhanced.sequence_gaps > 0:
|
||
errors.append(f"Seq: {enhanced.sequence_gaps}")
|
||
if enhanced.rtc_sync_errors > 0:
|
||
errors.append(f"RTC: {enhanced.rtc_sync_errors}")
|
||
if enhanced.format_errors > 0:
|
||
errors.append(f"Fmt: {enhanced.format_errors}")
|
||
if enhanced.overflow_errors > 0:
|
||
errors.append(f"Ovf: {enhanced.overflow_errors}")
|
||
|
||
if errors:
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Errors: {' | '.join(errors)}")
|
||
y_offset += 1
|
||
|
||
# Data analysis
|
||
if enhanced.channel_count > 0:
|
||
channel_info = f"Channels: {enhanced.channel_count}"
|
||
if enhanced.analog_channels > 0:
|
||
channel_info += f" (Analog: {enhanced.analog_channels})"
|
||
if enhanced.pcm_channels > 0:
|
||
channel_info += f" (PCM: {enhanced.pcm_channels})"
|
||
if enhanced.tmats_frames > 0:
|
||
channel_info += f" (TMATS: {enhanced.tmats_frames})"
|
||
|
||
stdscr.addstr(y_offset, x_offset + 2, channel_info)
|
||
y_offset += 1
|
||
|
||
# Primary data type
|
||
if enhanced.primary_data_type != "Unknown":
|
||
stdscr.addstr(y_offset, x_offset + 2, f"Primary Type: {enhanced.primary_data_type}")
|
||
y_offset += 1
|
||
|
||
except curses.error:
|
||
pass
|
||
|
||
def _get_enhanced_analysis_lines(self, flow: FlowStats) -> int:
|
||
"""Calculate how many lines the enhanced analysis will take"""
|
||
enhanced = flow.enhanced_analysis
|
||
lines = 1 # Header line
|
||
|
||
if enhanced.has_internal_timing:
|
||
lines += 2 # Clock drift + timing quality
|
||
if enhanced.anomaly_rate > 0:
|
||
lines += 1 # Anomaly rate
|
||
|
||
if enhanced.avg_frame_quality > 0:
|
||
lines += 1 # Frame quality
|
||
# Check if we have errors to display
|
||
if any([enhanced.sequence_gaps, enhanced.rtc_sync_errors,
|
||
enhanced.format_errors, enhanced.overflow_errors]):
|
||
lines += 1
|
||
|
||
if enhanced.channel_count > 0:
|
||
lines += 1 # Channel info
|
||
|
||
if enhanced.primary_data_type != "Unknown":
|
||
lines += 1 # Primary data type
|
||
|
||
return lines
|
||
|
||
def _draw_decode_tab(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int,
|
||
flow: FlowStats, selected_frame_type: Optional[str]):
|
||
"""Draw the Decode tab content showing decoded frame information"""
|
||
|
||
if flow.enhanced_analysis.decoder_type == "Standard":
|
||
stdscr.addstr(y_offset, x_offset, "No enhanced decoder available for this flow", curses.A_DIM)
|
||
return
|
||
|
||
# Header for decode information
|
||
stdscr.addstr(y_offset, x_offset, f"DECODED DATA ({flow.enhanced_analysis.decoder_type}):", curses.A_BOLD)
|
||
y_offset += 2
|
||
|
||
# Tree view of decoded information
|
||
if flow.enhanced_analysis.decoder_type == "Chapter10_Enhanced":
|
||
self._draw_ch10_decode_tree(stdscr, x_offset, y_offset, width, max_lines, flow)
|
||
else:
|
||
stdscr.addstr(y_offset, x_offset, f"Decoder type '{flow.enhanced_analysis.decoder_type}' display not implemented")
|
||
|
||
def _draw_ch10_decode_tree(self, stdscr, x_offset: int, y_offset: int, width: int, max_lines: int, flow: FlowStats):
|
||
"""Draw Chapter 10 decoded information in tree format"""
|
||
enhanced = flow.enhanced_analysis
|
||
|
||
tree_items = [
|
||
("📋 Header Information", None, 0),
|
||
(" Decoder Type", enhanced.decoder_type, 1),
|
||
(" Primary Data Type", enhanced.primary_data_type, 1),
|
||
(" Channel Count", str(enhanced.channel_count) if enhanced.channel_count > 0 else "Unknown", 1),
|
||
("", None, 0), # Spacer
|
||
("⏱️ Timing Analysis", None, 0),
|
||
(" Has Internal Timing", "Yes" if enhanced.has_internal_timing else "No", 1),
|
||
(" Timing Quality", enhanced.timing_quality.title(), 1),
|
||
(" Clock Drift (avg)", f"{enhanced.avg_clock_drift_ppm:.2f} PPM" if enhanced.avg_clock_drift_ppm != 0 else "N/A", 1),
|
||
(" Clock Drift (max)", f"{enhanced.max_clock_drift_ppm:.2f} PPM" if enhanced.max_clock_drift_ppm != 0 else "N/A", 1),
|
||
(" Timing Stability", enhanced.timing_stability.title(), 1),
|
||
(" Anomaly Rate", f"{enhanced.anomaly_rate:.1%}" if enhanced.anomaly_rate > 0 else "0%", 1),
|
||
(" Confidence Score", f"{enhanced.avg_confidence_score:.2f}" if enhanced.avg_confidence_score > 0 else "N/A", 1),
|
||
("", None, 0), # Spacer
|
||
("📊 Quality Metrics", None, 0),
|
||
(" Frame Quality (avg)", f"{enhanced.avg_frame_quality:.1f}%" if enhanced.avg_frame_quality > 0 else "N/A", 1),
|
||
(" Sequence Gaps", str(enhanced.sequence_gaps), 1),
|
||
(" RTC Sync Errors", str(enhanced.rtc_sync_errors), 1),
|
||
(" Format Errors", str(enhanced.format_errors), 1),
|
||
(" Overflow Errors", str(enhanced.overflow_errors), 1),
|
||
("", None, 0), # Spacer
|
||
("📡 Channel Information", None, 0),
|
||
(" Total Channels", str(enhanced.channel_count), 1),
|
||
(" Analog Channels", str(enhanced.analog_channels), 1),
|
||
(" PCM Channels", str(enhanced.pcm_channels), 1),
|
||
(" TMATS Frames", str(enhanced.tmats_frames), 1),
|
||
]
|
||
|
||
# Add decoded frame samples if available
|
||
if enhanced.sample_decoded_fields:
|
||
tree_items.extend([
|
||
("", None, 0), # Spacer
|
||
("🔍 Decoded Frame Samples", None, 0),
|
||
])
|
||
|
||
for frame_key, frame_data in enhanced.sample_decoded_fields.items():
|
||
tree_items.append((f" {frame_key.replace('_', ' ').title()}", None, 1))
|
||
|
||
# Show important fields first
|
||
priority_fields = ['packet_timestamp', 'internal_timestamp', 'data_type_name', 'channel_id',
|
||
'sequence_number', 'frame_quality_score', 'rtc_sync_error']
|
||
|
||
# Add priority fields
|
||
for field_name in priority_fields:
|
||
if field_name in frame_data:
|
||
display_value = self._format_field_value(field_name, frame_data[field_name])
|
||
tree_items.append((f" {field_name.replace('_', ' ').title()}", display_value, 2))
|
||
|
||
# Add other fields (up to 5 more)
|
||
other_fields = [k for k in frame_data.keys() if k not in priority_fields]
|
||
for i, field_name in enumerate(other_fields[:5]):
|
||
display_value = self._format_field_value(field_name, frame_data[field_name])
|
||
tree_items.append((f" {field_name.replace('_', ' ').title()}", display_value, 2))
|
||
|
||
if len(other_fields) > 5:
|
||
tree_items.append((f" ... and {len(other_fields) - 5} more fields", "", 2))
|
||
|
||
# Add available fields summary
|
||
if enhanced.available_field_names:
|
||
tree_items.extend([
|
||
("", None, 0), # Spacer
|
||
("📝 Available Decoder Fields", None, 0),
|
||
(" Total Fields Available", str(len(enhanced.available_field_names)), 1),
|
||
(" Field Categories", self._categorize_fields(enhanced.available_field_names), 1),
|
||
])
|
||
|
||
# Render tree items
|
||
current_y = y_offset
|
||
for label, value, indent_level in tree_items:
|
||
if current_y >= max_lines:
|
||
break
|
||
|
||
if not label: # Spacer line
|
||
current_y += 1
|
||
continue
|
||
|
||
indent = " " * indent_level
|
||
|
||
if value is None: # Category header
|
||
line = f"{indent}{label}"
|
||
stdscr.addstr(current_y, x_offset, line[:width-1], curses.A_BOLD)
|
||
else: # Key-value pair
|
||
line = f"{indent}{label}: {value}"
|
||
stdscr.addstr(current_y, x_offset, line[:width-1])
|
||
|
||
current_y += 1
|
||
|
||
# Add scrolling hint if content is cut off
|
||
if current_y >= max_lines and len(tree_items) > (max_lines - y_offset):
|
||
if max_lines > 0:
|
||
stdscr.addstr(max_lines - 1, x_offset, "... (content truncated)", curses.A_DIM)
|
||
|
||
def switch_tab(self):
|
||
"""Switch to the next tab"""
|
||
self.active_tab = (self.active_tab + 1) % len(self.tabs)
|
||
|
||
def _format_field_value(self, field_name: str, value) -> str:
|
||
"""Format field value for display"""
|
||
if value is None:
|
||
return "N/A"
|
||
|
||
# Special formatting for different field types
|
||
if "timestamp" in field_name.lower():
|
||
try:
|
||
return f"{float(value):.6f}s"
|
||
except:
|
||
return str(value)
|
||
elif "error" in field_name.lower() or field_name.endswith("_error"):
|
||
return "Yes" if value else "No"
|
||
elif "quality" in field_name.lower() and isinstance(value, (int, float)):
|
||
return f"{value:.1f}%"
|
||
elif isinstance(value, float):
|
||
return f"{value:.3f}"
|
||
elif isinstance(value, bool):
|
||
return "Yes" if value else "No"
|
||
else:
|
||
return str(value)
|
||
|
||
def _categorize_fields(self, field_names: List[str]) -> str:
|
||
"""Categorize available fields into groups"""
|
||
categories = {
|
||
"Timing": 0,
|
||
"Quality": 0,
|
||
"Data": 0,
|
||
"Header": 0,
|
||
"Other": 0
|
||
}
|
||
|
||
for field_name in field_names:
|
||
lower_name = field_name.lower()
|
||
if any(keyword in lower_name for keyword in ["time", "timestamp", "rtc", "drift"]):
|
||
categories["Timing"] += 1
|
||
elif any(keyword in lower_name for keyword in ["quality", "error", "sync", "confidence"]):
|
||
categories["Quality"] += 1
|
||
elif any(keyword in lower_name for keyword in ["data", "analog", "pcm", "channel", "sample"]):
|
||
categories["Data"] += 1
|
||
elif any(keyword in lower_name for keyword in ["header", "type", "id", "sequence", "length"]):
|
||
categories["Header"] += 1
|
||
else:
|
||
categories["Other"] += 1
|
||
|
||
# Format as compact string
|
||
active_categories = [f"{k}:{v}" for k, v in categories.items() if v > 0]
|
||
return ", ".join(active_categories) |