Mountain/IPC/Enhanced/MessageCompressor/
Compressor.rs1#![allow(non_snake_case)]
2
3use std::{
10 collections::VecDeque,
11 io::{Read, Write},
12};
13
14use bincode::serde::{decode_from_slice, encode_to_vec};
15use brotli::{CompressorReader, CompressorWriter, enc::BrotliEncoderParams};
16use flate2::{
17 Compression,
18 write::{GzEncoder, ZlibEncoder},
19};
20use tokio::time::Instant;
21
22use crate::IPC::Enhanced::MessageCompressor::{
23 BatchConfig::Struct as BatchConfig,
24 BatchStats::Struct as BatchStats,
25 CompressedBatch::Struct as CompressedBatch,
26 CompressionAlgorithm::Enum as CompressionAlgorithm,
27 CompressionInfo::Struct as CompressionInfo,
28 CompressionLevel::Enum as CompressionLevel,
29};
30
31pub struct Struct {
32 pub(super) Config:BatchConfig,
33 pub(super) CurrentBatch:VecDeque<Vec<u8>>,
34 pub(super) BatchStartTime:Option<Instant>,
35 pub(super) BatchSizeBytes:usize,
36}
37
38impl Struct {
39 pub fn new(config:BatchConfig) -> Self {
40 Self {
41 Config:config,
42 CurrentBatch:VecDeque::new(),
43 BatchStartTime:None,
44 BatchSizeBytes:0,
45 }
46 }
47
48 pub fn add_message(&mut self, MessageData:&[u8]) -> bool {
49 let MessageSize = MessageData.len();
50 let _should_compress = MessageSize >= self.Config.CompressionThresholdBytes;
51
52 if self.BatchSizeBytes + MessageSize > self.Config.MaxBatchSize * 1024 {
53 return false;
54 }
55
56 self.CurrentBatch.push_back(MessageData.to_vec());
57 self.BatchSizeBytes += MessageSize;
58
59 if self.BatchStartTime.is_none() {
60 self.BatchStartTime = Some(Instant::now());
61 }
62
63 true
64 }
65
66 pub fn should_flush(&self) -> bool {
67 if self.CurrentBatch.is_empty() {
68 return false;
69 }
70
71 if self.CurrentBatch.len() >= self.Config.MaxBatchSize {
72 return true;
73 }
74
75 if let Some(start_time) = self.BatchStartTime {
76 let elapsed = start_time.elapsed();
77 if elapsed.as_millis() >= self.Config.MaxBatchDelayMs as u128 {
78 return true;
79 }
80 }
81
82 false
83 }
84
85 pub fn flush_batch(&mut self) -> Result<CompressedBatch, String> {
86 if self.CurrentBatch.is_empty() {
87 return Err("No messages in batch to flush".to_string());
88 }
89
90 let BatchMessages:Vec<Vec<u8>> = self.CurrentBatch.drain(..).collect();
91 let total_size = self.BatchSizeBytes;
92
93 self.BatchStartTime = None;
94 self.BatchSizeBytes = 0;
95
96 let config = bincode::config::standard();
97 let serialized_batch =
98 encode_to_vec(&BatchMessages, config).map_err(|e| format!("Failed to serialize batch: {}", e))?;
99
100 let (CompressedData, compression_info) = if total_size >= self.Config.CompressionThresholdBytes {
101 self.compress_data(&serialized_batch).map(|(data, info)| (Some(data), info))
102 } else {
103 Ok((None, CompressionInfo::none()))
104 }?;
105
106 Ok(CompressedBatch {
107 messages_count:BatchMessages.len(),
108 original_size:total_size,
109 compressed_size:CompressedData.as_ref().map(|d| d.len()).unwrap_or(total_size),
110 compressed_data:CompressedData,
111 compression_info,
112 timestamp:std::time::SystemTime::now()
113 .duration_since(std::time::UNIX_EPOCH)
114 .unwrap_or_default()
115 .as_millis() as u64,
116 })
117 }
118
119 fn compress_data(&self, data:&[u8]) -> Result<(Vec<u8>, CompressionInfo), String> {
120 match self.Config.Algorithm {
121 CompressionAlgorithm::Brotli => self.compress_brotli(data),
122 CompressionAlgorithm::Gzip => self.compress_gzip(data),
123 CompressionAlgorithm::Zlib => self.compress_zlib(data),
124 }
125 }
126
127 fn compress_brotli(&self, data:&[u8]) -> Result<(Vec<u8>, CompressionInfo), String> {
128 let mut params = BrotliEncoderParams::default();
129 params.quality = self.Config.CompressionLevel as i32;
130
131 let mut compressed = Vec::new();
132 {
133 let mut writer = CompressorWriter::with_params(&mut compressed, data.len().try_into().unwrap(), ¶ms);
134 std::io::Write::write_all(&mut writer, data).map_err(|e| format!("Brotli compression failed: {}", e))?;
135 writer.flush().map_err(|e| format!("Brotli flush failed: {}", e))?;
136 }
137
138 let ratio = data.len() as f64 / compressed.len() as f64;
139
140 Ok((
141 compressed,
142 CompressionInfo { algorithm:"brotli".to_string(), level:self.Config.CompressionLevel as u32, ratio },
143 ))
144 }
145
146 fn compress_gzip(&self, data:&[u8]) -> Result<(Vec<u8>, CompressionInfo), String> {
147 let mut encoder = GzEncoder::new(Vec::new(), Compression::new(self.Config.CompressionLevel as u32));
148 encoder.write_all(data).map_err(|e| format!("Gzip compression failed: {}", e))?;
149
150 let compressed = encoder.finish().map_err(|e| format!("Gzip finish failed: {}", e))?;
151
152 let ratio = data.len() as f64 / compressed.len() as f64;
153
154 Ok((
155 compressed,
156 CompressionInfo { algorithm:"gzip".to_string(), level:self.Config.CompressionLevel as u32, ratio },
157 ))
158 }
159
160 fn compress_zlib(&self, data:&[u8]) -> Result<(Vec<u8>, CompressionInfo), String> {
161 let mut encoder = ZlibEncoder::new(Vec::new(), Compression::new(self.Config.CompressionLevel as u32));
162 encoder.write_all(data).map_err(|e| format!("Zlib compression failed: {}", e))?;
163
164 let compressed = encoder.finish().map_err(|e| format!("Zlib finish failed: {}", e))?;
165
166 let ratio = data.len() as f64 / compressed.len() as f64;
167
168 Ok((
169 compressed,
170 CompressionInfo { algorithm:"zlib".to_string(), level:self.Config.CompressionLevel as u32, ratio },
171 ))
172 }
173
174 pub fn decompress_batch(&self, batch:&CompressedBatch) -> Result<Vec<Vec<u8>>, String> {
175 let data = if let Some(ref compressed_data) = batch.compressed_data {
176 self.decompress_data(compressed_data, &batch.compression_info.algorithm)?
177 } else {
178 encode_to_vec(&batch, bincode::config::standard()).map_err(|e| format!("Serialization failed: {}", e))?
179 };
180
181 let (decoded, _) = decode_from_slice::<Vec<Vec<u8>>, _>(&data, bincode::config::standard())
182 .map_err(|e| format!("Failed to deserialize batch: {}", e))?;
183 Ok(decoded)
184 }
185
186 fn decompress_data(&self, data:&[u8], algorithm:&str) -> Result<Vec<u8>, String> {
187 match algorithm {
188 "brotli" => self.decompress_brotli(data),
189 "gzip" => self.decompress_gzip(data),
190 "zlib" => self.decompress_zlib(data),
191 _ => Err(format!("Unsupported compression algorithm: {}", algorithm)),
192 }
193 }
194
195 fn decompress_brotli(&self, data:&[u8]) -> Result<Vec<u8>, String> {
196 let mut decompressed = Vec::new();
197 let mut reader = CompressorReader::new(data, 0, data.len().try_into().unwrap(), data.len().try_into().unwrap());
198
199 std::io::Read::read_to_end(&mut reader, &mut decompressed)
200 .map_err(|e| format!("Brotli decompression failed: {}", e))?;
201
202 Ok(decompressed)
203 }
204
205 fn decompress_gzip(&self, data:&[u8]) -> Result<Vec<u8>, String> {
206 use flate2::read::GzDecoder;
207
208 let mut decoder = GzDecoder::new(data);
209 let mut decompressed = Vec::new();
210 decoder
211 .read_to_end(&mut decompressed)
212 .map_err(|e| format!("Gzip decompression failed: {}", e))?;
213
214 Ok(decompressed)
215 }
216
217 fn decompress_zlib(&self, data:&[u8]) -> Result<Vec<u8>, String> {
218 use flate2::read::ZlibDecoder;
219
220 let mut decoder = ZlibDecoder::new(data);
221 let mut decompressed = Vec::new();
222 decoder
223 .read_to_end(&mut decompressed)
224 .map_err(|e| format!("Zlib decompression failed: {}", e))?;
225
226 Ok(decompressed)
227 }
228
229 pub fn get_batch_stats(&self) -> BatchStats {
230 BatchStats {
231 messages_count:self.CurrentBatch.len(),
232 total_size_bytes:self.BatchSizeBytes,
233 batch_age_ms:self.BatchStartTime.map(|t| t.elapsed().as_millis() as u64).unwrap_or(0),
234 }
235 }
236
237 pub fn clear_batch(&mut self) {
238 self.CurrentBatch.clear();
239 self.BatchStartTime = None;
240 self.BatchSizeBytes = 0;
241 }
242
243 pub fn compress_single_message(
244 message_data:&[u8],
245 algorithm:CompressionAlgorithm,
246 level:CompressionLevel,
247 ) -> Result<(Vec<u8>, CompressionInfo), String> {
248 let config = BatchConfig { Algorithm:algorithm, CompressionLevel:level, ..Default::default() };
249
250 let compressor = Self::new(config);
251 compressor.compress_data(message_data)
252 }
253
254 pub fn calculate_compression_ratio(original_size:usize, compressed_size:usize) -> f64 {
255 if compressed_size == 0 {
256 return 0.0;
257 }
258 original_size as f64 / compressed_size as f64
259 }
260
261 pub fn estimate_savings(original_size:usize, expected_ratio:f64) -> usize {
262 (original_size as f64 * (1.0 - 1.0 / expected_ratio)) as usize
263 }
264}