Major Features: - Complete modern TUI interface with three focused views - Enhanced multi-column layout: Source | Proto | Destination | Extended | Frame Type | Metrics - Simplified navigation with 1/2/3 hotkeys instead of F1/F2/F3 - Protocol hierarchy: Transport (TCP/UDP) → Extended (CH10/PTP) → Frame Types - Classic TUI preserved with --classic flag Views Implemented: 1. Flow Analysis View: Enhanced multi-column flow overview with protocol detection 2. Packet Decoder View: Three-panel deep inspection (Flows | Frames | Fields) 3. Statistical Analysis View: Four analysis modes with timing and quality metrics Technical Improvements: - Left-aligned text columns with IP:port precision - Transport protocol separation from extended protocols - Frame type identification (CH10-Data, TMATS, PTP Sync) - Cross-view communication with persistent flow selection - Context-sensitive help and status bars - Comprehensive error handling with console fallback
424 lines
17 KiB
Python
424 lines
17 KiB
Python
"""
|
|
Chapter 10 Timing Analysis Plugin
|
|
Demonstrates how to use exposed CH10 frame variables for advanced analysis
|
|
"""
|
|
|
|
from typing import Dict, Any, List, Optional
|
|
from dataclasses import dataclass
|
|
import statistics
|
|
|
|
@dataclass
|
|
class TimingAnalysisResult:
|
|
"""Results from timing analysis"""
|
|
packet_timestamp: float
|
|
internal_timestamp: Optional[float]
|
|
time_delta: Optional[float]
|
|
clock_drift_ppm: Optional[float]
|
|
timing_quality: str
|
|
anomaly_detected: bool
|
|
confidence_score: float
|
|
|
|
class Chapter10TimingAnalysisPlugin:
|
|
"""Analysis plugin demonstrating use of exposed CH10 variables"""
|
|
|
|
def __init__(self):
|
|
self.flow_timing_history: Dict[str, List[TimingAnalysisResult]] = {}
|
|
self.reference_time_base: Optional[float] = None
|
|
|
|
@property
|
|
def plugin_name(self) -> str:
|
|
return "CH10_Advanced_Timing_Analysis"
|
|
|
|
@property
|
|
def required_fields(self) -> List[str]:
|
|
"""Fields required from the enhanced CH10 decoder"""
|
|
return [
|
|
'packet_timestamp', # From packet capture
|
|
'internal_timestamp', # From CH10 internal time
|
|
'internal_seconds', # CH10 seconds component
|
|
'internal_nanoseconds', # CH10 nanoseconds component
|
|
'channel_id', # Channel identifier
|
|
'sequence_number', # Packet sequence
|
|
'relative_time_counter', # RTC value
|
|
'data_type', # Data type for context
|
|
'rtc_sync_error', # RTC sync status
|
|
'time_source_confidence', # Decoder's confidence assessment
|
|
'frame_quality_score' # Overall frame quality
|
|
]
|
|
|
|
def analyze_frame(self, frame_data, flow_context: Dict[str, Any]) -> TimingAnalysisResult:
|
|
"""Analyze timing for a single CH10 frame"""
|
|
|
|
# Extract timing information
|
|
packet_time = frame_data.get_field('packet_timestamp')
|
|
internal_time = frame_data.get_field('internal_timestamp')
|
|
|
|
if internal_time is None:
|
|
return TimingAnalysisResult(
|
|
packet_timestamp=packet_time,
|
|
internal_timestamp=None,
|
|
time_delta=None,
|
|
clock_drift_ppm=None,
|
|
timing_quality="no_internal_time",
|
|
anomaly_detected=False,
|
|
confidence_score=0.0
|
|
)
|
|
|
|
# Calculate basic time delta
|
|
time_delta = packet_time - internal_time
|
|
|
|
# Analyze timing quality
|
|
timing_quality = self._assess_timing_quality(frame_data, time_delta)
|
|
|
|
# Calculate clock drift in PPM
|
|
clock_drift_ppm = self._calculate_clock_drift_ppm(
|
|
time_delta,
|
|
flow_context.get('flow_duration', 1.0)
|
|
)
|
|
|
|
# Detect anomalies
|
|
anomaly_detected = self._detect_timing_anomaly(frame_data, time_delta)
|
|
|
|
# Calculate confidence score
|
|
confidence_score = self._calculate_confidence_score(frame_data, time_delta)
|
|
|
|
result = TimingAnalysisResult(
|
|
packet_timestamp=packet_time,
|
|
internal_timestamp=internal_time,
|
|
time_delta=time_delta,
|
|
clock_drift_ppm=clock_drift_ppm,
|
|
timing_quality=timing_quality,
|
|
anomaly_detected=anomaly_detected,
|
|
confidence_score=confidence_score
|
|
)
|
|
|
|
# Store in flow history for trend analysis
|
|
flow_key = flow_context.get('flow_key', 'unknown')
|
|
if flow_key not in self.flow_timing_history:
|
|
self.flow_timing_history[flow_key] = []
|
|
self.flow_timing_history[flow_key].append(result)
|
|
|
|
return result
|
|
|
|
def analyze_flow_timing_trends(self, flow_key: str) -> Dict[str, Any]:
|
|
"""Analyze timing trends across an entire flow"""
|
|
|
|
if flow_key not in self.flow_timing_history:
|
|
return {'error': 'No timing data available for flow'}
|
|
|
|
timing_results = self.flow_timing_history[flow_key]
|
|
valid_results = [r for r in timing_results if r.time_delta is not None]
|
|
|
|
if not valid_results:
|
|
return {'error': 'No valid timing data in flow'}
|
|
|
|
# Extract time deltas for statistical analysis
|
|
time_deltas = [r.time_delta for r in valid_results]
|
|
drift_values = [r.clock_drift_ppm for r in valid_results if r.clock_drift_ppm is not None]
|
|
|
|
# Calculate comprehensive statistics
|
|
analysis = {
|
|
'frame_count': len(valid_results),
|
|
'timing_statistics': {
|
|
'mean_delta': statistics.mean(time_deltas),
|
|
'median_delta': statistics.median(time_deltas),
|
|
'std_deviation': statistics.stdev(time_deltas) if len(time_deltas) > 1 else 0.0,
|
|
'min_delta': min(time_deltas),
|
|
'max_delta': max(time_deltas),
|
|
'delta_range': max(time_deltas) - min(time_deltas)
|
|
},
|
|
'drift_analysis': {},
|
|
'quality_metrics': {
|
|
'anomaly_rate': sum(1 for r in valid_results if r.anomaly_detected) / len(valid_results),
|
|
'avg_confidence': statistics.mean([r.confidence_score for r in valid_results]),
|
|
'quality_trend': self._analyze_quality_trend(valid_results)
|
|
},
|
|
'timing_stability': self._assess_timing_stability(time_deltas),
|
|
'recommendations': []
|
|
}
|
|
|
|
# Drift analysis
|
|
if drift_values:
|
|
analysis['drift_analysis'] = {
|
|
'mean_drift_ppm': statistics.mean(drift_values),
|
|
'drift_stability': statistics.stdev(drift_values) if len(drift_values) > 1 else 0.0,
|
|
'drift_trend': self._calculate_drift_trend(drift_values),
|
|
'max_drift_ppm': max(abs(d) for d in drift_values)
|
|
}
|
|
|
|
# Generate recommendations
|
|
analysis['recommendations'] = self._generate_recommendations(analysis)
|
|
|
|
return analysis
|
|
|
|
def compare_channel_timing(self, flow_data: Dict[str, List]) -> Dict[str, Any]:
|
|
"""Compare timing across multiple CH10 channels"""
|
|
|
|
channel_analysis = {}
|
|
|
|
for flow_key, timing_results in self.flow_timing_history.items():
|
|
# Extract channel ID from first result (simplified)
|
|
if timing_results:
|
|
# Would need to extract channel_id from frame_data
|
|
# This is a simplified example
|
|
channel_analysis[flow_key] = {
|
|
'timing_variance': self._calculate_timing_variance(timing_results),
|
|
'sync_quality': self._assess_sync_quality(timing_results),
|
|
'relative_drift': self._calculate_relative_drift(timing_results)
|
|
}
|
|
|
|
# Cross-channel correlation analysis
|
|
correlation_matrix = self._calculate_channel_correlations(channel_analysis)
|
|
|
|
return {
|
|
'channel_individual_analysis': channel_analysis,
|
|
'cross_channel_correlations': correlation_matrix,
|
|
'sync_status': self._assess_overall_sync_status(channel_analysis),
|
|
'timing_reference_quality': self._assess_reference_quality()
|
|
}
|
|
|
|
def _assess_timing_quality(self, frame_data, time_delta: float) -> str:
|
|
"""Assess the quality of timing data"""
|
|
|
|
# Check for RTC sync errors
|
|
if frame_data.get_field('rtc_sync_error', False):
|
|
return "rtc_sync_error"
|
|
|
|
# Check frame quality
|
|
frame_quality = frame_data.get_field('frame_quality_score', 0)
|
|
if frame_quality < 50:
|
|
return "poor_frame_quality"
|
|
|
|
# Check time delta magnitude
|
|
abs_delta = abs(time_delta)
|
|
if abs_delta > 1.0: # More than 1 second difference
|
|
return "large_time_discrepancy"
|
|
elif abs_delta > 0.1: # More than 100ms difference
|
|
return "moderate_time_discrepancy"
|
|
elif abs_delta > 0.001: # More than 1ms difference
|
|
return "small_time_discrepancy"
|
|
else:
|
|
return "excellent"
|
|
|
|
def _calculate_clock_drift_ppm(self, time_delta: float, flow_duration: float) -> float:
|
|
"""Calculate clock drift in parts per million"""
|
|
if flow_duration <= 0:
|
|
return 0.0
|
|
|
|
# Simplified drift calculation
|
|
# In practice, would need more sophisticated analysis
|
|
drift_fraction = time_delta / flow_duration
|
|
return drift_fraction * 1_000_000 # Convert to PPM
|
|
|
|
def _detect_timing_anomaly(self, frame_data, time_delta: float) -> bool:
|
|
"""Detect timing anomalies in this frame"""
|
|
|
|
# Multiple criteria for anomaly detection
|
|
anomaly_indicators = [
|
|
abs(time_delta) > 0.1, # Large time discrepancy
|
|
frame_data.get_field('rtc_sync_error', False), # RTC sync error
|
|
frame_data.get_field('format_error', False), # Format error
|
|
frame_data.get_field('overflow_error', False), # Overflow error
|
|
frame_data.get_field('frame_quality_score', 100) < 70 # Poor frame quality
|
|
]
|
|
|
|
return any(anomaly_indicators)
|
|
|
|
def _calculate_confidence_score(self, frame_data, time_delta: float) -> float:
|
|
"""Calculate confidence in timing analysis"""
|
|
|
|
confidence_factors = []
|
|
|
|
# Frame quality factor
|
|
frame_quality = frame_data.get_field('frame_quality_score', 0) / 100.0
|
|
confidence_factors.append(frame_quality)
|
|
|
|
# Time source confidence from decoder
|
|
time_confidence = frame_data.get_field('time_source_confidence', 0.5)
|
|
confidence_factors.append(time_confidence)
|
|
|
|
# Time delta reasonableness
|
|
abs_delta = abs(time_delta)
|
|
delta_confidence = max(0.0, 1.0 - (abs_delta / 10.0)) # Decreases with larger deltas
|
|
confidence_factors.append(delta_confidence)
|
|
|
|
# Error flags (reduce confidence)
|
|
error_penalty = 0.0
|
|
if frame_data.get_field('rtc_sync_error', False):
|
|
error_penalty += 0.3
|
|
if frame_data.get_field('format_error', False):
|
|
error_penalty += 0.2
|
|
if frame_data.get_field('overflow_error', False):
|
|
error_penalty += 0.2
|
|
|
|
base_confidence = statistics.mean(confidence_factors)
|
|
final_confidence = max(0.0, base_confidence - error_penalty)
|
|
|
|
return final_confidence
|
|
|
|
def _analyze_quality_trend(self, timing_results: List[TimingAnalysisResult]) -> str:
|
|
"""Analyze trend in timing quality over time"""
|
|
|
|
if len(timing_results) < 10:
|
|
return "insufficient_data"
|
|
|
|
# Split into segments and compare
|
|
segment_size = len(timing_results) // 3
|
|
early_segment = timing_results[:segment_size]
|
|
late_segment = timing_results[-segment_size:]
|
|
|
|
early_confidence = statistics.mean([r.confidence_score for r in early_segment])
|
|
late_confidence = statistics.mean([r.confidence_score for r in late_segment])
|
|
|
|
if late_confidence > early_confidence + 0.1:
|
|
return "improving"
|
|
elif late_confidence < early_confidence - 0.1:
|
|
return "degrading"
|
|
else:
|
|
return "stable"
|
|
|
|
def _assess_timing_stability(self, time_deltas: List[float]) -> Dict[str, Any]:
|
|
"""Assess the stability of timing measurements"""
|
|
|
|
if len(time_deltas) < 2:
|
|
return {'stability': 'insufficient_data'}
|
|
|
|
std_dev = statistics.stdev(time_deltas)
|
|
mean_abs_dev = statistics.mean([abs(d) for d in time_deltas])
|
|
|
|
# Coefficient of variation
|
|
mean_delta = statistics.mean(time_deltas)
|
|
cv = std_dev / abs(mean_delta) if mean_delta != 0 else float('inf')
|
|
|
|
# Stability classification
|
|
if std_dev < 0.001: # Less than 1ms standard deviation
|
|
stability = "excellent"
|
|
elif std_dev < 0.01: # Less than 10ms standard deviation
|
|
stability = "good"
|
|
elif std_dev < 0.1: # Less than 100ms standard deviation
|
|
stability = "moderate"
|
|
else:
|
|
stability = "poor"
|
|
|
|
return {
|
|
'stability': stability,
|
|
'standard_deviation': std_dev,
|
|
'mean_absolute_deviation': mean_abs_dev,
|
|
'coefficient_of_variation': cv,
|
|
'drift_consistency': 'stable' if cv < 0.1 else 'variable'
|
|
}
|
|
|
|
def _calculate_drift_trend(self, drift_values: List[float]) -> str:
|
|
"""Calculate the trend in clock drift"""
|
|
|
|
if len(drift_values) < 5:
|
|
return "insufficient_data"
|
|
|
|
# Simple linear trend analysis
|
|
x = list(range(len(drift_values)))
|
|
y = drift_values
|
|
|
|
# Calculate correlation coefficient as trend indicator
|
|
n = len(x)
|
|
sum_x = sum(x)
|
|
sum_y = sum(y)
|
|
sum_xy = sum(x[i] * y[i] for i in range(n))
|
|
sum_x2 = sum(x[i] * x[i] for i in range(n))
|
|
|
|
denominator = (n * sum_x2 - sum_x * sum_x)
|
|
if denominator == 0:
|
|
return "stable"
|
|
|
|
slope = (n * sum_xy - sum_x * sum_y) / denominator
|
|
|
|
if slope > 1.0: # Increasing drift
|
|
return "increasing"
|
|
elif slope < -1.0: # Decreasing drift
|
|
return "decreasing"
|
|
else:
|
|
return "stable"
|
|
|
|
def _generate_recommendations(self, analysis: Dict[str, Any]) -> List[str]:
|
|
"""Generate recommendations based on timing analysis"""
|
|
|
|
recommendations = []
|
|
|
|
# Check anomaly rate
|
|
anomaly_rate = analysis.get('quality_metrics', {}).get('anomaly_rate', 0)
|
|
if anomaly_rate > 0.1: # More than 10% anomalies
|
|
recommendations.append("High anomaly rate detected - check timing source stability")
|
|
|
|
# Check drift
|
|
drift_analysis = analysis.get('drift_analysis', {})
|
|
max_drift = drift_analysis.get('max_drift_ppm', 0)
|
|
if max_drift > 100: # More than 100 PPM drift
|
|
recommendations.append("Significant clock drift detected - calibrate timing reference")
|
|
|
|
# Check timing stability
|
|
stability_info = analysis.get('timing_stability', {})
|
|
if stability_info.get('stability') in ['poor', 'moderate']:
|
|
recommendations.append("Timing instability detected - investigate noise sources")
|
|
|
|
# Check confidence
|
|
avg_confidence = analysis.get('quality_metrics', {}).get('avg_confidence', 1.0)
|
|
if avg_confidence < 0.7:
|
|
recommendations.append("Low timing confidence - verify CH10 frame integrity")
|
|
|
|
if not recommendations:
|
|
recommendations.append("Timing analysis shows good quality - no issues detected")
|
|
|
|
return recommendations
|
|
|
|
def _calculate_timing_variance(self, timing_results: List[TimingAnalysisResult]) -> float:
|
|
"""Calculate timing variance for a channel"""
|
|
deltas = [r.time_delta for r in timing_results if r.time_delta is not None]
|
|
return statistics.variance(deltas) if len(deltas) > 1 else 0.0
|
|
|
|
def _assess_sync_quality(self, timing_results: List[TimingAnalysisResult]) -> str:
|
|
"""Assess synchronization quality for a channel"""
|
|
if not timing_results:
|
|
return "no_data"
|
|
|
|
avg_confidence = statistics.mean([r.confidence_score for r in timing_results])
|
|
|
|
if avg_confidence > 0.9:
|
|
return "excellent"
|
|
elif avg_confidence > 0.7:
|
|
return "good"
|
|
elif avg_confidence > 0.5:
|
|
return "moderate"
|
|
else:
|
|
return "poor"
|
|
|
|
def _calculate_relative_drift(self, timing_results: List[TimingAnalysisResult]) -> float:
|
|
"""Calculate relative drift compared to reference"""
|
|
# Simplified calculation - would compare against reference channel
|
|
deltas = [r.time_delta for r in timing_results if r.time_delta is not None]
|
|
return statistics.stdev(deltas) if len(deltas) > 1 else 0.0
|
|
|
|
def _calculate_channel_correlations(self, channel_analysis: Dict) -> Dict[str, float]:
|
|
"""Calculate correlations between channels"""
|
|
# Placeholder for cross-channel correlation analysis
|
|
return {"correlation_analysis": "not_implemented_in_demo"}
|
|
|
|
def _assess_overall_sync_status(self, channel_analysis: Dict) -> str:
|
|
"""Assess overall synchronization status across channels"""
|
|
sync_qualities = [info.get('sync_quality', 'poor') for info in channel_analysis.values()]
|
|
|
|
excellent_count = sync_qualities.count('excellent')
|
|
good_count = sync_qualities.count('good')
|
|
total_count = len(sync_qualities)
|
|
|
|
if total_count == 0:
|
|
return "no_data"
|
|
elif excellent_count / total_count > 0.8:
|
|
return "excellent"
|
|
elif (excellent_count + good_count) / total_count > 0.6:
|
|
return "good"
|
|
else:
|
|
return "poor"
|
|
|
|
def _assess_reference_quality(self) -> str:
|
|
"""Assess the quality of the timing reference"""
|
|
# Placeholder for reference quality assessment
|
|
return "analysis_pending" |