commit b1f730630ab0d86b0304737cdb45a7d2a15fe413 Author: sigil-03 Date: Wed Dec 10 16:11:23 2025 -0700 init repo + add edge-telemetry puzzle diff --git a/README.md b/README.md new file mode 100644 index 0000000..caaa22a --- /dev/null +++ b/README.md @@ -0,0 +1,7 @@ +# PUZZLES +assortment of puzzles which i've completed in order to practice my programming skills. + +# AI DISCLAIMER +each puzzle uses LLM generated challenges which are copied by me into a `challenge.md` file. + +**this is the _only_ AI generated content in this repository.** diff --git a/edge-telemetry/.gitignore b/edge-telemetry/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/edge-telemetry/.gitignore @@ -0,0 +1 @@ +/target diff --git a/edge-telemetry/Cargo.lock b/edge-telemetry/Cargo.lock new file mode 100644 index 0000000..9a298d3 --- /dev/null +++ b/edge-telemetry/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "edge-telemetry" +version = "0.1.0" diff --git a/edge-telemetry/Cargo.toml b/edge-telemetry/Cargo.toml new file mode 100644 index 0000000..8addf5d --- /dev/null +++ b/edge-telemetry/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "edge-telemetry" +version = "0.1.0" +edition = "2024" + +[dependencies] diff --git a/edge-telemetry/README.md b/edge-telemetry/README.md new file mode 100644 index 0000000..6e3ff50 --- /dev/null +++ b/edge-telemetry/README.md @@ -0,0 +1,8 @@ +# EDGE TELEMETRY +this is a toy problem used as rust practice. the challenge text is located in `challenge.md`. + + +# AI DISCLAIMER +this repository contains a challenge which was generated by chatGPT. this content exists in: `challenge.md`. + +this is the _only_ LLM / AI generated content in this repository, everything else was done by hand using my brain. diff --git a/edge-telemetry/challenge.md b/edge-telemetry/challenge.md new file mode 100644 index 0000000..00ef9ac --- /dev/null +++ b/edge-telemetry/challenge.md @@ -0,0 +1,24 @@ +**THIS CHALLENGE GENERATED BY CHATGPT** + +# Edge Telemetry +## Problem Overview: + +You are developing software for an edge device that collects power usage data from sensors at periodic intervals. +The device has limited memory, so you need to design a system that efficiently stores recent data, compresses it, +and manages memory effectively. The goal is to minimize the amount of memory used while ensuring that the telemetry +data is still accessible and compressed. + +## Requirements: +**Data Structure:** You will be given a list of telemetry readings, each consisting of a timestamp and power usage value. You need to store the readings in a compressed form, using Run-Length Encoding (RLE). + +**Memory Management:** The device has a limited memory capacity of N readings. If the number of readings exceeds this limit, the oldest readings should be discarded to make room for the new data. + +**Compression:** Use Run-Length Encoding (RLE) to compress consecutive identical readings. This will reduce the amount of memory needed for storing repeated values. + +## API: + +`add_reading(timestamp, power_usage)`: Adds a new telemetry reading and compresses the data using RLE. + +`get_compressed_data()`: Returns the compressed data as a list of tuples, where each tuple contains the power usage value and the count of consecutive occurrences. + +**END LLM GENERATED CONTENT** diff --git a/edge-telemetry/src/main.rs b/edge-telemetry/src/main.rs new file mode 100644 index 0000000..97206df --- /dev/null +++ b/edge-telemetry/src/main.rs @@ -0,0 +1,157 @@ +pub trait EdgeTelemetry { + /// Add a new telemetry reading to the underlying storage type + // TODO: update the timestamp to something that makes more sense maybe? + fn add_reading(&mut self, entry: E); + + /// Returns the compressed data as a collection of tuples, where each tuple has the format: + /// `(usage, consecutive occurances)` + fn get_compressed_data<'a>(&'a self) -> impl Iterator + where + E: 'a; +} + +mod entry { + #[derive(Clone)] + pub struct Entry { + timestamp: usize, + reading: usize, + } + + impl Entry { + pub fn new(timestamp: usize, reading: usize) -> Self { + Self { timestamp, reading } + } + pub fn timestamp(&self) -> usize { + self.timestamp + } + pub fn reading(&self) -> usize { + self.reading + } + } + + #[cfg(test)] + mod test { + use super::*; + + #[test] + fn create_entry() { + let t = 3; + let r = 1; + + let e = Entry::new(t, r); + + assert_eq!(t, e.timestamp()); + assert_eq!(r, e.reading()); + } + } +} + +mod storage { + use std::collections::VecDeque; + + use crate::EdgeTelemetry; + use crate::entry::Entry; + + pub struct RingBufferStorage { + size: usize, + buf: VecDeque<(Entry, usize)>, + } + + impl RingBufferStorage { + pub fn new(size: usize) -> Self { + Self { + size, + buf: VecDeque::with_capacity(size), + } + } + + pub fn add_entry(&mut self, entry: Entry) { + // buffer is not empty, and the previous value matches + if let Some((prev, quantity)) = self.buf.iter_mut().last() + && prev.reading() == entry.reading() + { + // TODO: add some logic here to overflow into a new entry if needed + *quantity = quantity.saturating_add(1); + } + // buffer is either empty, or the previous value does not match + else { + // check capacity to make sure we aren't full (not really necessary on the empty case but this is a little easier to read) + if self.buf.len() == self.size { + self.buf.pop_front(); + } + self.buf.push_back((entry, 1)); + } + } + } + + impl EdgeTelemetry for RingBufferStorage { + fn add_reading(&mut self, entry: Entry) { + self.add_entry(entry); + } + + fn get_compressed_data<'a>(&'a self) -> impl Iterator + where + Entry: 'a, + { + self.buf.iter() + } + } +} + +#[cfg(test)] +mod test { + use crate::EdgeTelemetry; + use crate::entry::Entry; + use crate::storage::RingBufferStorage; + + #[test] + fn add_and_get_reading() { + let size = 10; + let mut s = RingBufferStorage::new(size); + + let t = 1; + let r = 3; + let e = Entry::new(t, r); + + s.add_reading(e); + + let data = s.get_compressed_data(); + + data.into_iter().for_each(|(entry, quantity)| { + assert_eq!(entry.timestamp(), t); + assert_eq!(entry.reading(), r); + assert_eq!(*quantity, 1); + }); + } + + #[test] + fn duplicate_readings() { + let size = 10; + let mut s = RingBufferStorage::new(size); + + let t = 1; + let r = 3; + let q = 3; + + for _ in 0..q { + let e = Entry::new(t, r); + s.add_reading(e); + } + + let data = s.get_compressed_data(); + + data.into_iter().for_each(|(entry, quantity)| { + assert_eq!(entry.timestamp(), t); + assert_eq!(entry.reading(), r); + assert_eq!(*quantity, q); + }); + } +} + +fn main() { + println!("Hello, world!"); +} + +// NOTES +// * assuming the sample rate is constant, we can extract sample time from the run length by knowing the base stamp, and incrementing to the offset +// * this also means we can actually just stamp the start time and then compute the sample time via the offset