pretty good
This commit is contained in:
279
analyzer/protocols/decoders/custom_timing.py
Normal file
279
analyzer/protocols/decoders/custom_timing.py
Normal file
@@ -0,0 +1,279 @@
|
||||
"""
|
||||
Custom timing decoder for proprietary Chapter 10 timing frames
|
||||
Handles the 0x72xx-0x78xx timing sequence found in ACTTS-like systems
|
||||
"""
|
||||
|
||||
import struct
|
||||
from typing import Dict, Any, Optional, List
|
||||
from .base import DataTypeDecoder, DecodedPayload
|
||||
|
||||
|
||||
class CustomTimingDecoder(DataTypeDecoder):
|
||||
"""Decoder for custom timing frames (0x7200-0x7899)"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.data_type_base = 0x72
|
||||
self.data_type_name = "Custom Timing"
|
||||
self.supported_formats = []
|
||||
# Support all 0x72xx - 0x78xx variants
|
||||
for base in range(0x72, 0x79):
|
||||
for variant in range(0x00, 0x100):
|
||||
self.supported_formats.append((base << 8) | variant)
|
||||
|
||||
def can_decode(self, data_type: int) -> bool:
|
||||
# Check if data type is in the 0x72xx-0x78xx range
|
||||
return 0x7200 <= data_type <= 0x78FF
|
||||
|
||||
def get_data_type_name(self, data_type: int) -> str:
|
||||
base = (data_type >> 8) & 0xFF
|
||||
variant = data_type & 0xFF
|
||||
|
||||
timing_types = {
|
||||
0x72: "Custom ACTTS Timing",
|
||||
0x73: "Extended Timing Format",
|
||||
0x74: "Sync Timing Format",
|
||||
0x75: "Clock Reference Format",
|
||||
0x76: "Time Correlation Format",
|
||||
0x77: "Timing Validation Format",
|
||||
0x78: "Multi-Source Timing"
|
||||
}
|
||||
|
||||
base_name = timing_types.get(base, f"Timing Format 0x{base:02x}")
|
||||
return f"{base_name} (Variant 0x{variant:02x})"
|
||||
|
||||
def decode(self, payload: bytes, ch10_header: Dict[str, Any]) -> Optional[DecodedPayload]:
|
||||
"""Decode custom timing payload"""
|
||||
data_type = ch10_header.get('data_type', 0)
|
||||
|
||||
if not self.can_decode(data_type):
|
||||
return None
|
||||
|
||||
decoded_data = {}
|
||||
errors = []
|
||||
|
||||
# Parse IPH if present
|
||||
iph = self._parse_intra_packet_header(payload)
|
||||
if iph:
|
||||
decoded_data.update(iph)
|
||||
data_start = iph['data_start']
|
||||
else:
|
||||
data_start = 0
|
||||
# For custom timing, missing IPH might be normal
|
||||
|
||||
# Analyze timing data structure
|
||||
timing_analysis = self._analyze_timing_structure(payload, data_start, data_type)
|
||||
decoded_data.update(timing_analysis)
|
||||
|
||||
# Extract CH-10 timing information from header
|
||||
ch10_time = ch10_header.get('relative_time_counter', 0)
|
||||
decoded_data['ch10_time_counter'] = ch10_time
|
||||
decoded_data['ch10_sequence'] = ch10_header.get('sequence_number', 0)
|
||||
decoded_data['ch10_channel'] = ch10_header.get('channel_id', 0)
|
||||
|
||||
# Calculate timing metrics
|
||||
if 'timing_samples' in decoded_data and decoded_data['timing_samples']:
|
||||
timing_metrics = self._calculate_timing_metrics(decoded_data['timing_samples'])
|
||||
decoded_data['timing_metrics'] = timing_metrics
|
||||
|
||||
return DecodedPayload(
|
||||
data_type=data_type,
|
||||
data_type_name=self.get_data_type_name(data_type),
|
||||
format_version=(data_type >> 8) & 0x0F,
|
||||
decoded_data=decoded_data,
|
||||
raw_payload=payload,
|
||||
errors=errors,
|
||||
metadata={'decoder': 'CustomTimingDecoder', 'timing_type': 'proprietary'}
|
||||
)
|
||||
|
||||
def _analyze_timing_structure(self, payload: bytes, data_start: int, data_type: int) -> Dict[str, Any]:
|
||||
"""Analyze the structure of timing data"""
|
||||
analysis = {}
|
||||
|
||||
if data_start >= len(payload):
|
||||
return {'error': 'No timing data available'}
|
||||
|
||||
timing_data = payload[data_start:]
|
||||
analysis['timing_data_length'] = len(timing_data)
|
||||
|
||||
# Look for timing patterns
|
||||
timing_samples = []
|
||||
timestamps = []
|
||||
|
||||
# Try different word sizes for timing data
|
||||
for word_size in [4, 8]:
|
||||
if len(timing_data) >= word_size:
|
||||
samples = self._extract_timing_words(timing_data, word_size)
|
||||
if samples:
|
||||
timing_samples.extend(samples[:50]) # Limit to first 50 samples
|
||||
|
||||
analysis['timing_samples'] = timing_samples
|
||||
analysis['sample_count'] = len(timing_samples)
|
||||
|
||||
# Look for embedded timing markers
|
||||
timing_markers = self._find_timing_markers(timing_data)
|
||||
if timing_markers:
|
||||
analysis['timing_markers'] = timing_markers
|
||||
|
||||
# Detect timing format based on data type
|
||||
base_type = (data_type >> 8) & 0xFF
|
||||
if base_type == 0x72:
|
||||
# ACTTS-style timing
|
||||
actts_analysis = self._analyze_actts_timing(timing_data)
|
||||
analysis.update(actts_analysis)
|
||||
elif base_type in [0x73, 0x74, 0x75, 0x76, 0x77]:
|
||||
# Extended timing formats
|
||||
extended_analysis = self._analyze_extended_timing(timing_data, base_type)
|
||||
analysis.update(extended_analysis)
|
||||
elif base_type == 0x78:
|
||||
# Multi-source timing
|
||||
multi_analysis = self._analyze_multi_source_timing(timing_data)
|
||||
analysis.update(multi_analysis)
|
||||
|
||||
return analysis
|
||||
|
||||
def _extract_timing_words(self, data: bytes, word_size: int) -> List[int]:
|
||||
"""Extract timing words from binary data"""
|
||||
words = []
|
||||
format_str = '<I' if word_size == 4 else '<Q'
|
||||
|
||||
for i in range(0, len(data) - word_size + 1, word_size):
|
||||
try:
|
||||
word = struct.unpack(format_str, data[i:i+word_size])[0]
|
||||
words.append(word)
|
||||
except struct.error:
|
||||
break
|
||||
|
||||
if len(words) >= 100: # Limit extraction
|
||||
break
|
||||
|
||||
return words
|
||||
|
||||
def _find_timing_markers(self, data: bytes) -> List[Dict[str, Any]]:
|
||||
"""Find timing synchronization markers in data"""
|
||||
markers = []
|
||||
|
||||
# Common timing sync patterns
|
||||
sync_patterns = [
|
||||
b'\x81\x81\x81\x81', # Potential sync pattern
|
||||
b'\x82\x82\x82\x82', # Another potential pattern
|
||||
b'\xa9\xa9\xa9\xa9', # Observed in your data
|
||||
]
|
||||
|
||||
for pattern in sync_patterns:
|
||||
offset = 0
|
||||
while True:
|
||||
pos = data.find(pattern, offset)
|
||||
if pos == -1:
|
||||
break
|
||||
|
||||
markers.append({
|
||||
'pattern': pattern.hex(),
|
||||
'offset': pos,
|
||||
'description': f'Sync pattern at offset {pos}'
|
||||
})
|
||||
|
||||
offset = pos + 1
|
||||
if len(markers) >= 20: # Limit markers
|
||||
break
|
||||
|
||||
return markers
|
||||
|
||||
def _analyze_actts_timing(self, data: bytes) -> Dict[str, Any]:
|
||||
"""Analyze ACTTS-style timing data"""
|
||||
analysis = {'timing_format': 'ACTTS-style'}
|
||||
|
||||
if len(data) >= 16:
|
||||
# Look for ACTTS-like header
|
||||
try:
|
||||
header = struct.unpack('<IIII', data[0:16])
|
||||
analysis['actts_header'] = {
|
||||
'word1': f'0x{header[0]:08x}',
|
||||
'word2': f'0x{header[1]:08x}',
|
||||
'word3': f'0x{header[2]:08x}',
|
||||
'word4': f'0x{header[3]:08x}'
|
||||
}
|
||||
|
||||
# Check for timing correlation
|
||||
if header[1] - header[0] in [1, 1000, 1000000]:
|
||||
analysis['timing_correlation'] = 'Incremental timing detected'
|
||||
|
||||
except struct.error:
|
||||
analysis['actts_parse_error'] = 'Failed to parse ACTTS header'
|
||||
|
||||
return analysis
|
||||
|
||||
def _analyze_extended_timing(self, data: bytes, base_type: int) -> Dict[str, Any]:
|
||||
"""Analyze extended timing formats (0x73-0x77)"""
|
||||
analysis = {'timing_format': f'Extended Format 0x{base_type:02x}'}
|
||||
|
||||
# Look for timing sequences
|
||||
if len(data) >= 8:
|
||||
try:
|
||||
seq_data = struct.unpack('<HH', data[0:4])
|
||||
analysis['sequence_info'] = {
|
||||
'seq1': seq_data[0],
|
||||
'seq2': seq_data[1],
|
||||
'delta': seq_data[1] - seq_data[0]
|
||||
}
|
||||
except struct.error:
|
||||
pass
|
||||
|
||||
return analysis
|
||||
|
||||
def _analyze_multi_source_timing(self, data: bytes) -> Dict[str, Any]:
|
||||
"""Analyze multi-source timing data (0x78)"""
|
||||
analysis = {'timing_format': 'Multi-source timing'}
|
||||
|
||||
# Look for multiple timing sources
|
||||
sources = []
|
||||
offset = 0
|
||||
|
||||
while offset + 8 <= len(data):
|
||||
try:
|
||||
source_data = struct.unpack('<II', data[offset:offset+8])
|
||||
sources.append({
|
||||
'source_id': source_data[0] & 0xFF,
|
||||
'timestamp': source_data[1],
|
||||
'offset': offset
|
||||
})
|
||||
offset += 8
|
||||
except struct.error:
|
||||
break
|
||||
|
||||
if len(sources) >= 10: # Limit sources
|
||||
break
|
||||
|
||||
analysis['timing_sources'] = sources
|
||||
analysis['source_count'] = len(sources)
|
||||
|
||||
return analysis
|
||||
|
||||
def _calculate_timing_metrics(self, samples: List[int]) -> Dict[str, Any]:
|
||||
"""Calculate timing statistics from samples"""
|
||||
if not samples or len(samples) < 2:
|
||||
return {}
|
||||
|
||||
# Calculate deltas
|
||||
deltas = [samples[i+1] - samples[i] for i in range(len(samples)-1)]
|
||||
|
||||
# Basic statistics
|
||||
metrics = {
|
||||
'sample_count': len(samples),
|
||||
'min_value': min(samples),
|
||||
'max_value': max(samples),
|
||||
'range': max(samples) - min(samples),
|
||||
'first_sample': samples[0],
|
||||
'last_sample': samples[-1]
|
||||
}
|
||||
|
||||
if deltas:
|
||||
metrics.update({
|
||||
'min_delta': min(deltas),
|
||||
'max_delta': max(deltas),
|
||||
'avg_delta': sum(deltas) / len(deltas),
|
||||
'zero_deltas': deltas.count(0),
|
||||
'constant_rate': len(set(deltas)) == 1
|
||||
})
|
||||
|
||||
return metrics
|
||||
Reference in New Issue
Block a user