From 6403cad1918002b4988c150dba74934977c31d23 Mon Sep 17 00:00:00 2001 From: CiscoTheWolf Date: Mon, 2 Oct 2023 08:05:48 +0200 Subject: [PATCH] Initial commit of yaml parse test. A complete new controll system. Still some bugs remain --- yaml parse test/.vscode/c_cpp_properties.json | 21 + yaml parse test/Dockerfile | 4 + yaml parse test/Point2D.py | 272 +++++++++++++ yaml parse test/README.md | 5 + .../__pycache__/Point2D.cpython-311.pyc | Bin 0 -> 17306 bytes yaml parse test/animation1_frame1.png | Bin 0 -> 306 bytes yaml parse test/animation1_frame2.png | Bin 0 -> 306 bytes yaml parse test/cache/point_array_cache.json | 1 + yaml parse test/devideIntoGroups.c | 67 ++++ yaml parse test/dizzyFace.png | Bin 0 -> 1023 bytes yaml parse test/eyesClosed_neutral.png | Bin 0 -> 277 bytes yaml parse test/fastproot/Cargo.lock | 317 +++++++++++++++ yaml parse test/fastproot/Cargo.toml | 18 + yaml parse test/fastproot/src/lib.rs | 131 +++++++ yaml parse test/graphics/loaded.png | Bin 0 -> 1394 bytes yaml parse test/neutral.png | Bin 0 -> 306 bytes yaml parse test/prootOS.py | 366 ++++++++++++++++++ yaml parse test/shared.so | Bin 0 -> 27357 bytes yaml parse test/test.py | 9 + yaml parse test/testAnimationYaml.yaml | 83 ++++ 20 files changed, 1294 insertions(+) create mode 100644 yaml parse test/.vscode/c_cpp_properties.json create mode 100644 yaml parse test/Dockerfile create mode 100644 yaml parse test/Point2D.py create mode 100644 yaml parse test/README.md create mode 100644 yaml parse test/__pycache__/Point2D.cpython-311.pyc create mode 100644 yaml parse test/animation1_frame1.png create mode 100644 yaml parse test/animation1_frame2.png create mode 100644 yaml parse test/cache/point_array_cache.json create mode 100644 yaml parse test/devideIntoGroups.c create mode 100644 yaml parse test/dizzyFace.png create mode 100644 yaml parse test/eyesClosed_neutral.png create mode 100644 yaml parse test/fastproot/Cargo.lock create mode 100644 yaml parse test/fastproot/Cargo.toml create mode 100644 yaml parse test/fastproot/src/lib.rs create mode 100644 yaml parse test/graphics/loaded.png create mode 100644 yaml parse test/neutral.png create mode 100644 yaml parse test/prootOS.py create mode 100644 yaml parse test/shared.so create mode 100644 yaml parse test/test.py create mode 100644 yaml parse test/testAnimationYaml.yaml diff --git a/yaml parse test/.vscode/c_cpp_properties.json b/yaml parse test/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..c78c930 --- /dev/null +++ b/yaml parse test/.vscode/c_cpp_properties.json @@ -0,0 +1,21 @@ +{ + "configurations": [ + { + "name": "Win32", + "includePath": [ + "${workspaceFolder}/**" + ], + "defines": [ + "_DEBUG", + "UNICODE", + "_UNICODE" + ], + "windowsSdkVersion": "10.0.22000.0", + "compilerPath": "cl.exe", + "cStandard": "c17", + "cppStandard": "c++17", + "intelliSenseMode": "windows-msvc-x64" + } + ], + "version": 4 +} \ No newline at end of file diff --git a/yaml parse test/Dockerfile b/yaml parse test/Dockerfile new file mode 100644 index 0000000..633ec9a --- /dev/null +++ b/yaml parse test/Dockerfile @@ -0,0 +1,4 @@ +FROM ghcr.io/cross-rs/armv7-unknown-linux-gnueabihf:main +RUN dpkg --add-architecture armhf +RUN apt-get update +RUN DEBIAN_FRONTEND=noninteractive apt-get install -yq python3.9:armhf python3.9-dev:armhf libpython3.9-dev:armhf \ No newline at end of file diff --git a/yaml parse test/Point2D.py b/yaml parse test/Point2D.py new file mode 100644 index 0000000..980eb95 --- /dev/null +++ b/yaml parse test/Point2D.py @@ -0,0 +1,272 @@ +import hashlib +import json +import os.path +import math +from scipy.optimize import linear_sum_assignment +import numpy as np +from PIL import Image +import fastproot + + +# location to store array cache +CACHE_FILE_PATH = "./cache//point_array_cache.json" +LOADED_IMAGE_PATH = "./graphics/loaded.png" + +CACHE_FILE_PATH = os.path.normpath(CACHE_FILE_PATH) +LOADED_IMAGE_PATH = os.path.normpath(LOADED_IMAGE_PATH) + +class Point2D: + x = 0 + y = 0 + color: tuple[int, int, int] = (0, 0, 0) + + def __init__(self, x, y, color: tuple[int, int, int] = (0, 0, 0)): + self.x = x + self.y = y + self.color = color + + def round(self): + self.x = round(self.x) + self.y = round(self.y) + return self + + def distance(self, other): + dx = self.x - other.x + dy = self.y - other.y + return math.sqrt(dx ** 2 + dy ** 2) + + def interpolate(self, other, percentage): + new_x = self.x + (other.x - self.x) * percentage + new_y = self.y + (other.y - self.y) * percentage + new_color = tuple(int((1 - percentage) * self.color[i] + percentage * other.color[i]) for i in range(3)) + return Point2D(new_x, new_y, new_color) + + def __eq__(self, other): + return (self.x, self.y) == (other.x, other.y) + + +def divide_points_into_groups(points): + result = fastproot.divide_into_groups(points) + res = [[Point2D(p[0], p[1], p[2]) for p in arr] for arr in result] + return res + +def _divide_points_into_groups(points): + def flood_fill(point, group): + if point not in group: + group.append(point) + + neighbors = [(point.x + 1, point.y), (point.x - 1, point.y), (point.x, point.y + 1), (point.x, point.y - 1)] + for neighbor_coords in neighbors: + neighbor = next((p for p in points if p.x == neighbor_coords[0] and p.y == neighbor_coords[1]), None) + if neighbor and neighbor not in group: + flood_fill(neighbor, group) + + groups = [] + remaining_points = [point for point in points if point.x < 64] # Filter points with x < 64 + + while remaining_points: + group = [] + flood_fill(remaining_points[0], group) + groups.append(group) + + # Remove points in the group from the remaining points + remaining_points = [point for point in remaining_points if point not in group] + + return groups + + +def compute_bounding_box(points: list[Point2D]) -> tuple[float, float, float, float]: + min_x = min(point.x for point in points) + max_x = max(point.x for point in points) + min_y = min(point.y for point in points) + max_y = max(point.y for point in points) + return min_x, max_x, min_y, max_y + +def calculate_distance(points1: list[Point2D], points2: list[Point2D]) -> float: + # Calculate the distance between the centers of two groups + center1_x = (points1[0].x + points1[-1].x) / 2 + center1_y = (points1[0].y + points1[-1].y) / 2 + center2_x = (points2[0].x + points2[-1].x) / 2 + center2_y = (points2[0].y + points2[-1].y) / 2 + return ((center1_x - center2_x) ** 2 + (center1_y - center2_y) ** 2) ** 0.5 + +def pair_groups(set_a: list[list[Point2D]], set_b: list[list[Point2D]]) -> list[tuple[list[Point2D], list[Point2D]]]: + pairs = [] + + # Create dictionaries to store bounding boxes for each group + bounding_boxes_a: dict[int, tuple[float, float, float, float]] = {} + bounding_boxes_b: dict[int, tuple[float, float, float, float]] = {} + + # Calculate bounding boxes for all groups in both sets + for i, group in enumerate(set_a + set_b): + bounding_box = compute_bounding_box(group) + if i < len(set_a): + bounding_boxes_a[i] = bounding_box + else: + bounding_boxes_b[i - len(set_a)] = bounding_box + + # Check for overlaps and determine pairs + for i, group_a in enumerate(set_a): + overlap_detected = False + for j, group_b in enumerate(set_b): + bounding_box_a = bounding_boxes_a[i] + bounding_box_b = bounding_boxes_b[j] + if ( + bounding_box_a[0] <= bounding_box_b[1] and + bounding_box_a[1] >= bounding_box_b[0] and + bounding_box_a[2] <= bounding_box_b[3] and + bounding_box_a[3] >= bounding_box_b[2] + ): + pairs.append((group_a, group_b)) + overlap_detected = True + break + + if not overlap_detected: + # Find the nearest neighbor in set B + nearest_group = min(set_b, key=lambda group: calculate_distance(group_a, group)) + pairs.append((group_a, nearest_group)) + + return pairs + +def mirror_points(points: list[Point2D]) -> list[Point2D]: + mirrored_points = [] + for point in points: + mirrored_x = 128 - point.x # Calculate the mirrored x-coordinate + mirrored_point = Point2D(mirrored_x, point.y, point.color) + mirrored_points.append(mirrored_point) + return mirrored_points + + +def get_image_hash(image): + image_hash = hashlib.sha1(image.tobytes()).hexdigest() + return image_hash + + +def load_cached_point_arrays(): + cached_point_arrays = {} + + if os.path.isfile(CACHE_FILE_PATH): + with open(CACHE_FILE_PATH, "r") as file: + cached_point_arrays = json.load(file) + + return cached_point_arrays + + +def save_cached_point_arrays(cached_point_arrays): + if not os.path.isfile(CACHE_FILE_PATH): + open(CACHE_FILE_PATH, "x").close() + + with open(CACHE_FILE_PATH, "w") as file: + json.dump(cached_point_arrays, file) + + +def generate_point_array_from_image(image): + image.load() + image = image.convert("RGB") # Convert image to RGB color mode + image_hash = get_image_hash(image) + cached_point_arrays = load_cached_point_arrays() + + if image_hash in cached_point_arrays: + print("Found existing point array matching png. Using existing array.") + return [Point2D(point["x"], point["y"], tuple(point["color"])) for point in cached_point_arrays[image_hash]] + + print("No existing point array matching png found. Generating now.") + width, height = image.size + pixel_array = [] + + image.save(LOADED_IMAGE_PATH) + + for y in range(height): + for x in range(width): + pixel = image.getpixel((x, y)) + if sum(pixel) > 15: # any pixel which total color value is greater than 15. + point = {"x": x, "y": y, "color": pixel} + pixel_array.append(point) + + cached_point_arrays[image_hash] = pixel_array + save_cached_point_arrays(cached_point_arrays) + + print("Point array generated and stored.") + return [Point2D(point["x"], point["y"], tuple(point["color"])) for point in pixel_array] + + +def generate_image_from_point_array(points: list[Point2D], width: int, height: int) -> Image: + # Create a new blank image + image = Image.new("RGB", (width, height), "black") + + # Set the pixels corresponding to the points as white + pixels = image.load() + for point in points: + point = point.round() + x = point.x + y = point.y + pixels[x, y] = point.color + + return image + + +def interpolate_point_pairs(pairs: list[tuple[Point2D, Point2D]], percentage: float) -> list[Point2D]: + interpolated_points:list[Point2D] = [] + for pair in pairs: + point1, point2 = pair + interpolated_point = point1.interpolate(point2, percentage) + interpolated_points.append(interpolated_point) + return interpolated_points + + +def pair_points(points1: list[Point2D], points2: list[Point2D]) -> list[tuple[Point2D, Point2D]]: + # Update the size of the point arrays + size1 = len(points1) + size2 = len(points2) + + # Duplicate points in the smaller array to match the size of the larger array + if size1 > size2: + num_duplicates = size1 - size2 + duplicated_points = np.random.choice(points2, size=num_duplicates).tolist() + points2 += duplicated_points + elif size2 > size1: + num_duplicates = size2 - size1 + duplicated_points = np.random.choice(points1, size=num_duplicates).tolist() + points1 += duplicated_points + + row_ind, col_ind = fastproot.solve(points1, points2) + + # Create pairs of points based on the optimal assignment + pairs = [] + for i, j in zip(row_ind, col_ind): + pairs.append((points1[i], points2[j])) + + return pairs + + + +def _pair_points(points1: list[Point2D], points2: list[Point2D]) -> list[tuple[Point2D, Point2D]]: + # Update the size of the point arrays + size1 = len(points1) + size2 = len(points2) + + # Duplicate points in the smaller array to match the size of the larger array + if size1 > size2: + num_duplicates = size1 - size2 + duplicated_points = np.random.choice(points2, size=num_duplicates).tolist() + points2 += duplicated_points + elif size2 > size1: + num_duplicates = size2 - size1 + duplicated_points = np.random.choice(points1, size=num_duplicates).tolist() + points1 += duplicated_points + + # Create a new cost matrix with the updated sizes + cost_matrix = np.zeros((size1, size2)) + for i in range(size1): + for j in range(size2): + cost_matrix[i, j] = points1[i].distance(points2[j]) + + # Solve the assignment problem using the Hungarian algorithm + row_ind, col_ind = linear_sum_assignment(cost_matrix) + + # Create pairs of points based on the optimal assignment + pairs = [] + for i, j in zip(row_ind, col_ind): + pairs.append((points1[i], points2[j])) + + return pairs \ No newline at end of file diff --git a/yaml parse test/README.md b/yaml parse test/README.md new file mode 100644 index 0000000..6c70535 --- /dev/null +++ b/yaml parse test/README.md @@ -0,0 +1,5 @@ +Crosscompile instructions: +* Build docker image with python armv7 installed: + `docker build -t my-image .` +* Use cross to crosscompile: + `cross.exe build --target=armv7-unknown-linux-gnueabihf --release` \ No newline at end of file diff --git a/yaml parse test/__pycache__/Point2D.cpython-311.pyc b/yaml parse test/__pycache__/Point2D.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ad6204e90aa1590134da37ab0651b418a304547 GIT binary patch literal 17306 zcmeHud2kz7nqN2202%~9fR`vzqy!z5L>;6^9hN<^Y)Yai$-2C{Y>F}j5lxB`Nl@LO zC^6s+$E7WU3caQsI)b;f31&;{rN@&DlW40{Ti!%7yPGIglS;R$Cqi|Vs+3AOwf>J= znyqsApXB$w#?cK@j%UWH>>p`#fBlZ{y?)2{zUw{wc_2{6;riyk*GYf0p5y)pMe-NR zLFD-}2glvvVqAS8s3^|57uD`K^P z4U5Vzml|VrczjY>pBSr0*_E*stc^h_xO7`$4N^7m?ca6B8c}xDcNjzJ{|oCzKUboj z@b^&9D%7(&#-HMv!(R~%9B<~0n!$l#DJn;l(V<9GQ3m>lhos>IGF*p;qW#kNs+QK? zXzyjIwRI#uFr0`)WjT5?!jf99DDmO(nwHjnIXZHApjT-fj7MWqtYu`lzqdeR2)J{R z=OYN*;UtdMQ@7&`))-b4kJaSpc1do@L%%*}agHT1=G=oav9g$Rsr)`iOt|Ih_QqV8 z$ugpUtU3>%pHY1v=0#t9hUX+Tz5Rep%i=R9dliA<>WvS^;oro3ITZ}u!ea0XE%xUm94F0ApqDZq~2#Pe=C;JhRi5;+Hg+3U@ z_08M+U+FocNV3xN2l1Od$M<*1(#^j3jh+LNay1bj>A4)gbm?Z#h#bd24-E|r_xJ1{ zPNuF4}S5^Q_ za@mT?lrZhiziR3cD@)<`9NAoNp|6P0sIZZ_g1{XvVaZE!R|+vZf3l8ONr}S9EhLyQe5iRf^zRHTP|N*INe9{_+J0u|F|XQbJFEzp>9*kezFRns@qmFa7r z)Mnb}Dt_{kR<}tDZq|dFRdKU9bcJrAGFtm==>8`H?{V+(?>X*pQkc6XiT^TQc;q?RcJ3M?QAG!SRfSF6^A(t&1VXO*vTygktBC ze32Q_P)s?e952pYOf_e@J3P-#?_x~f@f8i)@n(nN1UocYlgn_lY%}-)c{#F-vJpw{ z1^Wl%H08N|Hz7gVBW4vS%>f8y!^kwiNI- zq5|b8z}J7r{T(Pvts9~HQtzLvO<&8r^2rb9tMAu8=(yjgt=#*t{*m;nt7`Z8Ukz&= zJ=)41t^R^ue<4@IEXW|Evae{wn_ZT*;7W{+3`$@T(P6Zo2(Q`Ay1_apuR((08kWW) zH_X>f!%H#7{S`vLc=00GvAIxumVS*_Ez#K~6i_ArzW(}ate8;stUKLwcSGjDy@Oi) zrnxIx-EJ+kJLP(D&eQ6;#eMsA|DZZFto6tB{y1{Ax)B|)dPJ`tNqL_Jm(7f(rOew} zuvHJXs$%P6UPZ$~&-1+iCA`YARQeoN&Sm9OeU7V)_vM)GXO7|!H#>^ssSr|rwA_)1 zbPYnLJU>uod^1p#RRH+`67IUy;HpgLy{@@4KRc(c-T64UQx$izUO*d>X{j`qlhhiH z4oQ)SQ5J~|#bcv`6!u3V*G8j*W=^qDjF4rT4w*h>1S65?@Nhg4O$@|`l}JPmQ5H!B zndE@HiNIEX&p7s7Q)u7s@B_P1M1c*72cZI*59BTMeWmu^LVnYiP3b#w6-1azex`d=W8=fnaCG z_}+SIx2f%|ME!CN04R+{4dfig%iu?>#QaU!o2zEA&AU4?6`8hs)mmV^ z9#~&MGAEUa=bPt*&TVa={1P(9xBgZwW6jxMHpkbcvim`m`O$eN-kN`hj&|nrSSuhY zynRt60nIa>Fe-B&w2T21GPgN0^2?~(aLbZ1I+#$10t=k9?u8ANUj<$(9%Uy0c!W@` z=I?A^*^@x?<3RJ=dM(hV2ihhNW?ADNy3t6@*QFySbg_2YsEO#2@xPxU0ENT(o^Xdt zIU)L?W8D&OJNvN~h{C71QtE{6=8#|R=WYuQNLuJ*7E@$Oup8Dqs5^r%EWUt=byhU! zc`haN@i0`x#cPVbIRA2zOfuJTo61S_$ri;XRQ9ohI+4UDuk-Tl7k#*NA)|>Fh7ZLO zRU(BkcWO37fIS}?*NgM5Ic8n<@+JAGtIyFdPzMii7us(NR-KCth~t(x@Iqsd&#uS7m>tvBtE|g@G)Odz%~+7yn;0InsQi~ zj41?vUS$=Hz_2i$9LBmNpHs7%V-7pBIWBKO)nGqiyyRT~MsQdf=)Zg^E<^8#%P}Rl zR!jv;UJI1*j>rpFOp)pI4aVcKNZ-KVU@@(rb&(dY90tg1UEyIhJe;{UAHCoEAoA!< z{nZnXcb!mipFX3XKBu0)qFx=;&ksF5J*1u<%2WUpZ&udi2ZG^_j*LiF1I!Sh5GI;E zhKn%*!^OBGX;x-86+_Gi4!mb6Tc+2npjYzuh1Z15vU7Od0cq z(k|84h&$`A_~7X6qv;yWze@M7QvIu*)vtn<*73>a%;>!_wXSu(>HdZX9sg$YgV8U> z)V8iiO~2Zpo_b3^)vaE5M?3Y7dN`t9>env~s8>hytFk(JUAuZ+jgG0yZ=?=q*OI^v zKvo0H1fQ;0g<&q-a|aRB!oxb!MiSbJQGLbejF_#0j=)!J%ZT$XHM|pdwtD5vk@StZ z8m+ojuWnVVTfbfia{iTQ{(_jVeQ8F=I0Wg~Il&vWQ*?~Giad_{c_7I{6ox>Z{8z6d z;WI2IdYJ-HK7jCI3Gh`4&B* z0rh;$=Q<=(ho&6Y`JcO47vRv5NtQ8?Us%+9xsyI`3ZC3zVq)^Q>;%!CvJL<`y}IJ8 zd1s`DJJpkC)ctg)JIyzsxhQyfAMjy`5H%ZaQ#&w1vNRNh>t(n!ztu|6)>?PXcuZ9_3R2W@#)qL{bi41gg365JV-LfVo|m1+8|Uj+jvWtgL>4bGy#* z*-$0S60tH>rdF-Zbm&#<=UkfDqKhr6*pd~?KM33o%t)HppoB!BAUY zLs(mJR>$y&%NGKi4+Ks1E`&IL<>WDQe$ByBm&qR@tb+9M+1F}L zhWJT}F>jar2C!w);cZKkiHHrPosK4?$R%=z5s_Vr-&jHr97pNr^wpg89o(V@x1>8~ zT(ceoO76uh=Ko#8Pf*{<9{9c*1&+r&ufN}T(sKM>nC3Ix~i=#YBevr(YoY!!BT8x3-yq%>jEVM&4wsF7Nz-O$^2=Ita5Z!3CCdXD0vAX?MxT6d>NI}8dp97nB<;SEKgU=*3UL%JadkFzIy?! zV(WZ`7HXe7oc-Rj0?X2MT3}7)n&#g)cld!<+tTqcs%`Gl{9TXsYrYezaKa=rmm*=; z%V4idHWDMnm66z4@X7beNn@FC48T>5N3hBqGTdm;?UD>;(Y_v(oN!IJt@0-6w&P8B zk}j(wyVT4xRXo|9^w2cZcG>h+gyER}wo*h640pf1rS@;5jbE5;yj>pSCqx@AzX*cnh76T~G*$7V1^vTAchEa4g%_z_ENa zSI<`9SWnOt;Dl_$&kN;U|vx$mA3d2X1WzL^x2&9)5zK2;7|-*Zd+ znP@jsCj#H{jlkk>O#fAqj7jhDgC)KznZ)_&MJg4u1SpGCXX`XgVeiu3l>BZQ&lVuO zOr$y+9;Ucr51530*i@mv5%&y6hc3mUd&e6`q66|0t}ip2{NM^T0c!KFUw-3JmwKjK zd-=Tn^7%@vL4@aT{vgAp*PVmJq-VWvULZ=-y3P#(ZbCXNV`65mve zkX6Z%6fDNlluPnzD#ZHXw_+Qi_;pDhjE+QNQbOuYNHK%IlJDvzquh#%dhPK`Mu4`) zv7>7CTOPtk6xw4n8CxFa)Oep|gybZ!dIOXl0@>un6!6$0N8JD(fy}oSy0$1tU&cIX| zs(&fFA`A`wY5m$vC$=o~`b`U*&%2C)sUxY5)L6E0>s(ZC+?G0C3_T02OkdMOt26J+ zcj!&K9*1_Rp29-n;P1dUAsXIg|nfW8Q0g(f~#O< zd$)htunKCx=JUn7p+{@z(HnYF$MVMhhRmyKU7O}_*Zu9Pzy0fHzR={+#fl`k*cks# zA;7pdmI^rDo(Z^lk`9Yd2Ctm3*#zKkj-n$C;R5%t`=2Ku#4`e_UvwoGc?qXqWqv^H z@=gLfzq3V+ES?##nVP|&0a=dAQY>#_9ssUUR!F^Jlo!ILu2iDy1wp?>iU(8#bExQ>9GZCQ3XxeT$SMHgAbpBGZ?vS?{;ZZARRSDqCL zhUaorxjZ;<347_6qtGc6@k=*xn8NU0mTtrb`oRRCSi1)3OpH;+UJ)u`HO-9jey9q? zJoz?S_fPRCn*r$P!SdOLwDd2B{&?t<_}#cx(V|zhJgM0IxMH_f@rqvY%H)x(uSOMW z%z-L&4S@ow>WJLopkQzbOMY>%mL94EmCs%Ffs#9{;>(H`EAg`Kgxfsf%h$qjR|<8) z9}6Z|ba+p|6H7n4)PcbCt>xM{MHqA3aK6or@jUl7$KyxmJ8-zc;AOZG9dX4FM(9w3 zdq9DEQ!;}4JN6&yj2t|Cv@>$DY?(s z8Cp*0=IJFnu6zeQo57>}a{v&3eS<30e(CYv8hh`D@BZ-PxaL`}d%!^*uJ)&aijP|} z+qJ+3J+J}3Ja7Blj;CwZ-)o*b@RQbit>DHBP9(A%2L7IKenX&`wp0lIou!f?Ho)VB zl?rW5eo@oW==?>a(6QR{i*-DpO%o{4$r{WE78DBPuiu?UlnvjNMlwt-%YQNd{uF`Y zLgl#^*5wqwh>Z@7$g>FA2}_AymzE@~yFmS8LVo}NCg2N9zx2W0+k5|{ zRr77qeUQ1lYx7gl|9&LhtBGrLam^EP<7087CT`NjO{%!*OK;%)cv{iCYjy8h!rq!m z0Ds!6RyJi4nz%(5x2WP4(41>4%Vq#H=h_OIv!)euOF|~$eG95miN^n8^|F0}^Ou6K zPxSoK#{(`+d!(?E#hpQ!#aAgc01vdf_yYea;VR!|&VV-+r~`%;sw+|)_MjlnuXFNh zE9I4*`vC=!YY@0aL#5OedCEq+u|*;rdtikXz)T_MgaD(^g+9{CTy!Pqz*g?E_ZF*! zR(tL$RTiM6ybYt#is7`E2SyhRM=oPIt(30~{s2$XU21Z`xTCG{+?5pV^~GFQNm%0u z1(i*TG51~Pgg5ERSR_KeEb8~#OJj%b&~$f_yb}B#pe2e!j1e!zwat>JG?z|xCS8kX z2BwnX{DZE2<9iR1Pc$sufSUoH(J)he!tC5?7^mfWFSEqq{+93=g@Oe{hSFlAX()c? zF6=M@xh;wI?|wAFX4cz}DcES+t~6q&W;+xU8d+@65aF1oTNZf<*#^gUI+8`^l&Qw$ z4(RsD@gY+cv3P|GhZBB^I+fi3*yIQ%%=^*Kk3DHuy7z7wonEjUORgv`CZjnZ|1(r%czWZ*FnJS9`D9uR%xD?o3&nO|TmpokGAfRq z=s3`MAaeLvN0+Hp$?K`K2n}{*;D$74IB_7{G|NeVHJ8a`B*{|*J|;}p*g%Yww9B;5 zn=pK=lu3N1xx(bCizS=;i?o*VCUf4~yedE4FCbp|U!%~U;!)nhf}s;4-uI5Yd*sJQ zZylXHO6K0N+sD#Pnm?@j!*D%Uz&6w>*6S7PCyzX>Z`bN~==D3G$362@s5Ki^58Y42 zfLeZB6Hn;k2~|9iHR=C!zj|5LPb;8sEtt@eC?>uXylPpOCLGd*L#l8nTU|GM83XJ%V|9Sm#2YF${Z3ah^iRL<<0x-fl#YPOj?t1{8~CUyC4+%q^xxZ+7@ z&EwFT%uX%TtcRM_Q1jOdUaHg2ZD>a~(`A~l_NmaIHg3~|HeF~_g*MiSt(wrH3oWVu zON>$2$@8Ea&DWzND&%%#i7Ch=HG2!jEFvV&n2Gi`2)xKdgSiX|ivu(6MTQecLD>p2PYM$C7R_!bhqRw57lw$f2AqAOrJ^wME}lnJ;8-sZy1Bz! zBy-a`WZaoC%-o9kcPR1@k3uf-N$yL3#q{AB39g~D>BwBa=HI3JcQOA_dDREqx4Wl$ zrhBl-OSZY-f=&oP2Tg14_&)M|7?=$pf*QI~eEwD2F#AtZ?$ouJ)v3gkKjklaL}VFX zl-B^*eA|U-ClkW-Y_ln9rzvHsjeyA#GRR$gLC}||=a?eL(rR9*w;QU$cJh3h2mT%A zY0Z831}3V|RHF(EEr04b@BSdOaX`);NNdZz+l|U4P)SwHu_AMVYAr}PR>XXd@?Qe; zzv59&1AzZ*+=O+8ktTo0OAMrhY+d72dCEQG&Ni-n(%AaAv31`4piFDruQ%>j#rmwc z;)%HCvA71uT{Ur+F78spSktWb)zS@m)_~Ron9GxHNg%+t8 z6@_qqWtmlaPfV*m>A%-pJCFLP;`ZgIx!6!|{`g-&THRZN_J z=?~6$rjAY@W!uL2Ap9L-AT|2##Nojp)%9HYBz2!4Cw;NN9zeg?4^5;>>XysQC9`2}^}S7M%~q?Aw8)ZejJUuJe)|8p0SL#uu#$dPu2kR#R@h~tg?0&NX-vXCQ`ZA=WFS_h4x3{r$tQg<&F@LW71!`*c zF@`0Rkt6?m0{?-)Ljc1yF3E9)86M_8Gf3*mza+2^pxMH*fK zYYry8JaEI@03E2#~(;0GTlz0R{0$+~{|HI_`p3?eu&q~BGrKX} znc11%t5t5&D>qHP@f6m_PErlYOZ_@;LYw`;dnuV9ERgVqIsRVRlkoP(;qCKX_z%_* zE&Q4uel7Jzn#cbW1kK;5`WwlW{W|{-BDZ|&+#~hP>*g!Y`zO+~VHPS#`oKl8{V^n!Q%au>EUzYPtvfolE+f>eW zXSqr>e`mQG)qX=3RqdDzW~(==T*b4>>e&rXD%U)&T$6cCtK6*seqsCnpmIQmFZUp}R8Kg|+LLAjehiH*|CxqAVeB}Mpw@Zj7Rx$q-MQ%YL`hSu>F zGl}$pOzqucdM%7=#OHUIh4t+RS^UusI=g$eTRneCKijKH1L~D)x+JS-W&Nb0zmZ@_ zrQj&<=2xWGrN{2Vr;gy9b>^#Q0d^YXI=nh9v(Z@1yJqcQ9}WQR`3_l5ewYo z=PUBHBlr;J9A1wU_EOJbF(X(sDSk0ka|-|A5> z#Pqk!7)!E%%H@1Tx;ou|7gL1beD!?)eKYuQ$0OIHe)Zft>hX8~&#!H7%X>aKi)TU=fBt z%ia6y^e)dx{5>P#p=8^$&vW~&#!H7%X>aKi)TU=fBt z%ia6y^e)dx{5>P#p=8^$&vW + +static PyObject* divide_points_into_groups(PyObject* self, PyObject* args) { + PyObject* points_list; // Input list of points + if (!PyArg_ParseTuple(args, "O", &points_list)) { + PyErr_SetString(PyExc_TypeError, "Expected a list of points."); + return NULL; + } + + // Check if the input is a list + if (!PyList_Check(points_list)) { + PyErr_SetString(PyExc_TypeError, "Input must be a list of points."); + return NULL; + } + + // Create a Python list to store groups + PyObject* groups_list = PyList_New(0); + + // Iterate through the input points and divide them into groups + // (Simplified logic - you should implement your own logic here) + // In this example, we assume points are (x, y) pairs as Python tuples + // and groups are lists of points. + Py_ssize_t num_points = PyList_Size(points_list); + PyObject* current_group = PyList_New(0); + + for (Py_ssize_t i = 0; i < num_points; i++) { + PyObject* point = PyList_GetItem(points_list, i); + PyObject* x_obj = PyTuple_GetItem(point, 0); + PyObject* y_obj = PyTuple_GetItem(point, 1); + + // Process the point (x, y) here + // (You should implement your grouping logic) + + // For simplicity, we add points to the current group + PyList_Append(current_group, point); + + // Assume a condition for creating a new group (e.g., x > 50) + if (PyLong_AsLong(x_obj) > 50) { + PyList_Append(groups_list, current_group); + current_group = PyList_New(0); + } + } + + // Append the last group if it's not empty + if (PyList_Size(current_group) > 0) { + PyList_Append(groups_list, current_group); + } + + return groups_list; +} + +static PyMethodDef methods[] = { + {"divide_points_into_groups", divide_points_into_groups, METH_VARARGS, "Divide points into groups."}, + {NULL, NULL, 0, NULL} +}; + +static struct PyModuleDef module = { + PyModuleDef_HEAD_INIT, + "point_extension", + NULL, + -1, + methods +}; + +PyMODINIT_FUNC PyInit_point_extension(void) { + return PyModule_Create(&module); +} \ No newline at end of file diff --git a/yaml parse test/dizzyFace.png b/yaml parse test/dizzyFace.png new file mode 100644 index 0000000000000000000000000000000000000000..a4006ee096e3ea9618f76692fc6569e047282ae5 GIT binary patch literal 1023 zcmVPx#1ZP1_K>z@;j|==^1poj6Sx`(=MF0Q*Pt_4v(h+FP5qiZDjJ^<_xelstV$!2LIsz@}LI(paEyMDRI0YdAT2iwjPkO9iFZmtg0Ej zrx*F22L7P|Y_BnKwJ3MACcUB?&7TK$?1^=M{a-=+Qr#W-2F?X#m{GbDMn?-q~ zIg+9N`;e0 zx{fv8k|Fn<34d>Oe1l(phFgh>P>+jEmWxTAibki3L$ZiKy@x#0jV#)bCG(sNd}@Aw zd}f1wV$X~)>6aGpnh$wpg?ea!g>-C*c4)tZK*fhTbz_QoWrmP&ZI*IqoONQNcU`D@ zSFe3gwSY?8kt26xhSgX{jkP#>JMCn-~4YAt`@`cOG&9Mn*jxFl0B8N2k&;v-TkE2K$ zCc4{zVv-VPX<2ziCAz9=%BoaXp;T6smz6q8D06unYH-!M*w)oI05mqSSE8vAprO8w zy{i_5%Hz=70?^tf1Kuakk%S(#@3-h$i zV|3ZxCf(WPK>r5ma}M@VxI?hJLn@}|<8~jS9(g>Ryj~wF{Y9$qS)bR-!Q(kX zJ#@P{8R0)BJvlu)zqq_2dwor+ia33Bd2xPrdP4TtPijUWaC0jyKD_VlURp)&?|k$b z1-CbWfV8FrgAb2S&o8fU?;mo<5+CnxuP@I}j}O71+)0uo$u|jwLeg18y$DZQeg>-@ zddD?8XrogMzwicMUqO?}Y15Y*qc@bHCRxVUaBp~v16{RuN&AavN~K07^F?Unu0Mn) tM8?F#M1a$@17l($i22M3`09THz$dwFZMUj)FprfUBhzAoke8+5znj;w;|IfLG+I@ccN(nU*- zc6f9b?c?8ZWxkay=Va^V>q?c3?>grCF+0@oJ~m~{<9GRdGw f64 { + let dx = self.x as f64 - other.x as f64; + let dy = self.y as f64 - other.y as f64; + (dx.powf(2.0) + dy.powf(2.0)).sqrt() + } + + fn into_tuple(&self) -> TuplePoint { + (self.x, self.y, self.color) + } +} + +#[pyfunction] +pub fn solve(points1: Vec, points2: Vec) -> (Vec, Vec) { + let mut cost_matrix = Vec::with_capacity(points1.len() * points2.len()); + + for i in &points1 { + for j in &points2 { + cost_matrix.push(i.distance(*j)) + } + } + + lsap::solve(points1.len(), points2.len(), &cost_matrix, false).unwrap() +} + +pub fn flood_fill(point: Point, mut group: Vec, points: Vec) -> Vec { + if !group.contains(&point) { + group.push(point); + + let neighbors = [(point.x + 1, point.y), (point.x - 1, point.y), (point.x, point.y + 1), (point.x, point.y - 1)]; + for neighbor_coords in neighbors { + let neighbor = points.iter().find(|p| p.x == neighbor_coords.0 && p.y == neighbor_coords.1); + + if let Some(neighbor) = neighbor { + if !group.contains(&neighbor) { + group = flood_fill(*neighbor, group, points.clone()) + } + } + } + } + + group +} + +#[pyfunction] +pub fn divide_into_groups(points: Vec) -> Vec> { + let mut groups = vec![]; + + let mut remaining = points.iter().filter(|p| p.x < 64).collect::>(); + while let Some(&point) = remaining.get(0) { + let group = flood_fill(*point, vec![], points.clone()); + groups.push(group.iter().map(|v| v.into_tuple()).collect()); + + remaining = remaining.into_iter().filter(|p| !group.contains(p)).collect(); + } + + groups +} + +// #[pyfunction] +// fn pair_groups(set_a: Vec>, set_b: Vec>) -> Vec<(TuplePoint, TuplePoint)> { +// let mut pairs = vec![]; + +// // Create dictionaries to store bounding boxes for each group +// let mut bounding_boxes_a: std::collections::HashMap::new(); // dict[int, tuple[float, float, float, float]] = {} +// let mut bounding_boxes_a: std::collections::HashMap::new(); + +// // Calculate bounding boxes for all groups in both sets +// for (i, group) in set_a.iter().chain(set_b).iter().enumerate() { +// let bounding_box = compute_bounding_box(group); +// if i < set_a.len() { +// bounding_boxes_a[i] = bounding_box; +// } else { +// bounding_boxes_b[i - set_a.len()] = bounding_box; +// } +// } + +// // Check for overlaps and determine pairs +// for (i, group_a) in set_a.iter().enumerate() { +// let overlap_detected = false; +// for (j, group_b) in set_b.iter().enenumerate() { +// let bounding_box_a = bounding_boxes_a[i]; +// let bounding_box_b = bounding_boxes_b[j]; +// if ( +// bounding_box_a[0] <= bounding_box_b[1] && +// bounding_box_a[1] >= bounding_box_b[0] && +// bounding_box_a[2] <= bounding_box_b[3] && +// bounding_box_a[3] >= bounding_box_b[2] +// ) { +// pairs.push((group_a, group_b)); +// overlap_detected = true; +// break +// } +// } + +// if !overlap_detected { +// // Find the nearest neighbor in set B +// let mut nearest_group = set_b[0]; +// let mut nearest_value = 0.0f64; +// for val in set_b.iter() { +// if calculate_distance(group_a, val) < nearest_value { +// nearest_group = val; +// } +// } +// pairs.append((group_a, nearest_group)); +// } +// } + +// pairs +// } + +#[pymodule] +fn fastproot(_py: Python<'_>, m: &PyModule) -> PyResult<()> { + m.add_function(wrap_pyfunction!(solve, m)?)?; + m.add_function(wrap_pyfunction!(divide_into_groups, m)?)?; + // m.add_function(wrap_pyfunction!(pair_groups, m)?)?; + + Ok(()) +} \ No newline at end of file diff --git a/yaml parse test/graphics/loaded.png b/yaml parse test/graphics/loaded.png new file mode 100644 index 0000000000000000000000000000000000000000..7daf0ebf540190a6b912ee1230184ffbc565ffe3 GIT binary patch literal 1394 zcmV-&1&#WNP)Ou6UsVC%iH&k1`Ng9D zd8f=gd2dS+`RfD^03nP+7`^=ZKT)k4FFv9&4Ct(P!qc9bUQwn}EJWUfY5 z=8YPy(q}w}6t+Nj5Gb{f9!AkPp(wJ0`%gYPhS+h;j$`&VW+o65Moh=b>(oC=7K>m9 znQ>tcEm!s}Q9f;Jukm4_>eLdW%7r~!dvomuvb_V4=-pd-)+qC$ zC-~L{qkQ0a_Cp%+5i*Nt>q@TzfQP`yp^OFPHvm8-m~<2N?esW^NH-%B44?tP$e|1% z5&PF&i*B=Mf@S|jY(@TM0BC#T=i1&EjMk343){L@*^Oms2khLLPs>R3(lQqBY`Tj4 z%O!S`)z@8#W)twx7DM8h-r$~yF2{!Jba|g1J#OD3(L+7wtjf8# z{zTMra5bJ3y{>%&V-m_N+7wt4g^06gIaqVDzPTXKx~Zw|^M;BuZF|;t>|G~->d`eX zXg;DLNi{qN_hqR>x-*mXc^fdNsuDc7FKcnt5C9-a_-gm^2^NWf!VHoC%OcudeHs7) zt(%C5ckSq*9nGR8N_}~tzC2*IG}Lcy4irt%99hozF zcs{L0%_WxRKv6^eX0s($v3OX04yY|{0RRRmt(}dArR9TnF{=VV&7MxP=MTG<0|1ZY z7i)hqEF*_90Khm&(}k#<&j**v8V>+3)&FRAYpX5;0F~hF?H?PKy0S)H;{YJowcSb} zdkz4fosY7jPt4i?RPphp*vHc`mQ?eTXI9B0UrJ=Jap4<80QUEZPg!3*P?@UPrC^?PTo4x^_K-R4sN3R?svn%Xf>A|43&7ciSQs6 zfO5~f$vnxZ$;VSR)_z~lc(UF%rRnB@Ey7F`k=nlbhqu za(4bYzK->ZchnB-WxiOmDfU+3ue*IE9&c0>cOD5!anoICG&M{(M|DErzlP~Nvlqt2 y%V%CG4_IEAAgO)exJQ%e|DP{6L?|>(xp#iP_>}9 1] + if duplicated_triggers: + error_message = "Duplicated trigger(s) found:\n" + for trigger in duplicated_triggers: + error_message += f"Trigger: {trigger}, Animations: {', '.join(trigger_animations[trigger])}\n" + raise DuplicateTriggerError(error_message) + + return animations + except DuplicateTriggerError as e: + print(e) # Print the error message with duplicated triggers + raise e + except Exception as e: + print(f"Error parsing YAML file: {e}") + return [] + + + + +# Function to simulate playing an animation +def play_animation(animation): + print(f"Playing animation: {animation['name']}") + + +# Decision-Making Layer +class DecisionLayer: + def __init__(self, animations, rendering_layer): + self.animations = animations + self.rendering_layer = rendering_layer + + def get_animation_by_trigger(self, trigger): + """ + Get an animation from the list of parsed animations based on the specified trigger. + + Args: + animations (list): List of parsed animations. + trigger (str): The trigger to match. + + Returns: + dict: The animation dictionary matching the trigger, or None if not found. + """ + for animation in self.animations: + if animation.get('trigger') == trigger: + return animation + return None # Return None if no animation with the specified trigger is found + + + def handle_trigger(self, source, payload): + trigger = None + if source == "boot": + trigger = "boot" + + elif source == "blinkTimer": + if len(self.rendering_layer.animation_queue) == 0: + trigger = "blinkTimer" + + elif source == "mqtt": + print("Received message '" + str(payload)) + if len(str(payload)) < 17: + print("received massage too short to be valid") + sensor_type = str(payload)[2:6] + sensor_specifier = str(payload)[7:11] + sensor_state = str(payload)[12:16] + + if sensor_type == "Bean": + if sensor_state == "0000": + print("Bean ID:" + sensor_specifier + " Fell.") + elif sensor_state == "0001": + print("Bean ID:" + sensor_specifier + " Rose.") + else: + print("Bean ID:" + sensor_specifier + " in illegal state.") + + elif sensor_type == "Butn": + if sensor_state == "0000": + print("Button ID:" + sensor_specifier + " Fell.") + elif sensor_state == "0001": + print("Button ID:" + sensor_specifier + " Rose.") + else: + print("Received illegal state: " + sensor_state + " for Button ID:" + sensor_specifier) + + elif sensor_type == "Move": + if sensor_specifier == "000X": + print("Movement in X axis: " + sensor_state) + elif sensor_specifier == "000Y": + print("Movement in Y axis: " + sensor_state) + elif sensor_specifier == "000Z": + print("Movement in Z axis: " + sensor_state) + else: + print("Received illegal movement axis.") + + elif sensor_type == "Rott": + if sensor_specifier == "000X": + print("Rotation in X axis: " + sensor_state) + elif sensor_specifier == "000Y": + print("Rotation in Y axis: " + sensor_state) + elif sensor_specifier == "000Z": + print("Rotation in Z axis: " + sensor_state) + else: + print("Received illegal Rotation axis.") + + elif sensor_type == "Gest": + if sensor_specifier == "XXXX": + print("Gesture received: " + sensor_state) + else: + print("Received illegal gesture") + + else: + print("received illegal sensor type: " + sensor_type) + + trigger = str(payload)[2:16] + + # Implement logic to decide which animation to play based on the trigger and payload + for animation in self.animations: + if animation.get('trigger') == trigger: + self.rendering_layer.play_animation(animation) + + +class RenderingLayer: + def __init__(self, animations, frame_rate=40): + self.animations = animations + self.current_point_array = [] + self.current_animation_action = {} + self.frame_rate = frame_rate # Set the desired frame rate + self.frame_duration = 1.0 / frame_rate # Calculate the frame duration + self.animation_queue = [] # Initialize the animation queue + + def play_animation_by_name(self, animation_name): + for animation in self.animations: + if animation.get('name') == animation_name: + self.play_animation(animation) + + def play_animation(self, animation): + if len(self.animation_queue) > 0: + print("Stopping current animation...") + # Replace the currently playing animation with the new one + self.animation_queue = self.generate_animation_queue(animation) # Add frames to the queue + + + def append_animation(self, animation): + self.animation_queue = self.animation_queue + self.generate_animation_queue(animation) # Add frames to the queue + + def generate_animation_queue(self, animation): + animation_queue = [] + graphics = animation.get('graphics', []) + + for graphic in graphics: + if graphic.get('type') == 'image': + point_array = graphic.get('point_array') + duration = graphic.get('duration', 1) # Default duration is 1 frame if not specified + + # Add frames to the queue based on the specified duration + for _ in range(int(duration)): + animation_queue.append({'type': graphic.get('type'), 'point_array': point_array}) + + if graphic.get('type') == 'transition': + to_point_array = graphic.get('to_point_array') + duration = graphic.get('duration', 1) # Default duration is 1 frame if not specified + + # Add frames to the queue based on the specified duration + for i in range(int(duration)): + animation_queue.append({'type': graphic.get('type'), 'to_point_array': to_point_array, 'stepPercentage' : (1/(int(duration)-i))}) + + return animation_queue + + def start_rendering(self): + frameCount = 0 + new_image = Image.new("RGB", (128, 32), "black") + + devisiontime = 0 + pairgrouptime = 0 + pairpointtime = 0 + imagingtime = 0 + transitionFrameCount = 1 + + while True: + + start_time = time.time() # Get the current time before rendering + + if len(self.animation_queue) > 0: + current_animation_action = self.animation_queue.pop(0) + print("update action is: " + current_animation_action.get('type')) + + + # Render the next frame in the queue + if current_animation_action.get('type') == "image": + new_image = generate_image_from_point_array(current_animation_action.get('point_array'), 128, 32) + + self.current_point_array = current_animation_action.get('point_array') + print("image generated") + + elif current_animation_action.get('type') == "transition": + transitionFrameCount += 1 + divtime_start = time.time() + groupsa = divide_points_into_groups(self.current_point_array) + groupsb = divide_points_into_groups(current_animation_action.get('to_point_array')) + devisiontime += time.time() - divtime_start + + pairgrouptime_start = time.time() + paired_groups = pair_groups(groupsa, groupsb) + pairgrouptime += time.time() - pairgrouptime_start + + new_point_array = [] + for pair in paired_groups: + pairpointtime_start = time.time() + point_pairs = pair_points(pair[0], pair[1]) + pairpointtime += time.time() - pairpointtime_start + print(str(current_animation_action.get('stepPercentage'))) + new_point_array += interpolate_point_pairs(point_pairs, current_animation_action.get('stepPercentage')) + + imagingtime_start = time.time() + new_image = generate_image_from_point_array(new_point_array + mirror_points(new_point_array), 128, 32) + + imagingtime += time.time() - imagingtime_start + + self.current_point_array = new_point_array + + offscreen_canvas = matrix.CreateFrameCanvas() + offscreen_canvas.SetImage(new_image, unsafe=False) + matrix.SwapOnVSync(offscreen_canvas) + + # Save the image to a file with the desired format and file name + # new_image.save("output/frameNumber"+str(frameCount)+".png") + frameCount += 1 + + elapsed_time = time.time() - start_time # Calculate time elapsed during rendering + + # Calculate the time to sleep to achieve the desired frame rate + sleep_time = self.frame_duration - elapsed_time + print("remaining time in frame: " + str(sleep_time)) + if sleep_time > 0: + time.sleep(sleep_time) + print("average time cost per part for transition frames:") + print("devisiontime :" + str(devisiontime /transitionFrameCount )) + print("pairgrouptime :" + str(pairgrouptime /transitionFrameCount)) + print("pairpointtime :" + str(pairpointtime /transitionFrameCount)) + print("imagingtime :" + str(imagingtime /transitionFrameCount)) + + + devisiontime = 0 + pairgrouptime = 0 + pairpointtime = 0 + imagingtime = 0 + transitionFrameCount = 0 + + + + +# Function responsible for the blinking behaviour when Idle +def random_blinks(): + while True: + time.sleep(random.randint(5, 7)) + decision_layer.handle_trigger("blinkTimer", "") + + + + + + +# functions called by the MQTT listener +def on_connect(client, userdata, flags, response_code): + print("Connected to MQTT broker with result code " + str(response_code)) + client.subscribe("test") + + +# Function to handle MQTT message reception +def on_message(client, userdata, message): + # Pass the received message to the decision-making layer + decision_layer.handle_trigger("mqtt", message.payload) + + +def main(): + yaml_file = 'testAnimationYaml.yaml' # Replace with the path to your YAML file + + # Parse the YAML file to get animations + animations = load_animations(yaml_file) + + + # MQTT broker configuration + broker_address = "localhost" + broker_port = 1883 + broker_keepalive = 60 + + mqtt_client = mqtt.Client() + mqtt_client.on_connect = on_connect + mqtt_client.on_message = on_message + + mqtt_client.connect(broker_address, broker_port, broker_keepalive) + + mqtt_client.loop_start() + + # Initialize the rendering layer + rendering_layer = RenderingLayer(animations, frame_rate=10) + rendering_thread = threading.Thread(target=rendering_layer.start_rendering) + rendering_thread.start() + + # Initialize the decision-making layer + global decision_layer + decision_layer = DecisionLayer(animations, rendering_layer) + + # Create and start random blinks interrupts + screen_update_thread = threading.Thread(target=random_blinks) + screen_update_thread.start() + + + decision_layer.handle_trigger("boot", "") + + + +if __name__ == "__main__": + main() diff --git a/yaml parse test/shared.so b/yaml parse test/shared.so new file mode 100644 index 0000000000000000000000000000000000000000..5d4bba00f92c19f814e63a396fff99c274b06cc0 GIT binary patch literal 27357 zcmeHw4R{;XneLG^vJ~6#4|X7cp$at+SnT*8$A#9iEZZWoEi5^4!p}sOWLrd*j5IPy z+UmIZCf4D9*7SFgQ(y7sOc-$2UCzC0~H5_xLvq@Jx>FVqcy2ew{ zSncZ7h1II+fR{19rG$O$feT~I&)Ag~#!j+=bww6y6)Ogbh*rk7mtjk*3oENk@_pcB zH?r_8Nq7q2*MG%$y?6%7M;Z85-E3#fL+}J+FGA0X%Ry+M0#;b01B|U*Ms^T-qs!nL zRI9{xDoEU0h##_x>PnR{EZbRNdu=qVgn^Nm*PU!7zF}!+b+ff;L98Ts9ahqHo`Hm2 z@Y**t3G6~ei^!Q;N3 z07&Iun#Y^1^$dgvJ~)v$aZs+vg(u09N8vQ$k-jFC9yyB5E5Rgp7Ds z=p5PuuVX{Ka11Z^VSvP2Pf~qpXJs858a8rZi0U|tmFjRcR>Q)1P4yhPm}2K7R?^pl z)v$10b2CS_5brcr;@!ZZ+z;_`iT~i8;MA$LGi#(kWpdwfW#b`+o&7`aZG8ETi}ycD zMa|gu%_`2tV}diaAZ>hka$&l%dj9@Zj2*F4Mxq^&xbNq`3Uuo5+MDJM?0RXe9|u9oJvqRLaJiuawEekyz3A#TcS>E0=5 zoxgQ)adGN!+4Sj~Zk3-bX8R|ppgkn1L_zyz?+cl`;cnjG`Ccvc%1Vm zk(aDIEsFjg28YFkFPp~)iYIOihC~+(Dn-=P&+6l)R*A#VHM2%@#c}0=8SBQmY~e!R zjC12pm8+(ooat~P`G3wxs~yGM-l1N~I8zn{%2DX!bt{ z>)^YE6Z8N4TX^kW?f5Z&f|7rxb>9O#UJku7!G~7o1<<7h-hQRJ8rsI{=m^%1BTMo8 zJLnkHjMEA_N>w%WoB3yf&A*Qy5tf5nrk~#&oSvKh$wIqBH0@fSelM~2e9 z4*-U`)~Pe?ihb&gl)a1}!<1@?2G-p~JnYAjiVtjR{{z2ne?lA(+DUDqx>)VM)zgUV zd%5~Dbn8}wGW6`K`KurTUNv>LUHRtJSxGs>>mn3TAFDGe+9VWd7NGw8@JCgFSN6aw z^RpPF4u#M@=*f;bQUAv(8{kSiq0}%nl$<)_`20zJ;C4-&DJuQ)JOne%B-!_N>AtT} z>XNmTI#IgsPD-t=Lh5lBK~Ig;H9%!DLE57qx0!yc+DNDu7XQln$ zqBHrK|3tr~j|QFlzesyiXUj_WeifXlv#!#;r}&Pubno9|2X2v2nR!$O&BV_#=NAwk zyfmRcQ#-RsnyHwYbK3rtwC#t1mC5fQ7AbetesZi6eh}>R@u|ZuwAkGA+iH6W!z(as zKZN0h4y!HHi~mZC$I}1tXQXgs-|<}qcRoYcOjrHN@&c)y_E%SZ2aUkyAAm`36w#@{ zBrAQib^7dHB0mrA;#Kbu)$$uoCWp^|mdLNdBGDdKy=imiw2{^G+lcTFAW&`VK2A)ICr461qp{_1 zuHd$s`WBJ*hDmuq&Fx8qGnp8!(oa@YCVS(_(L3lykP;t{xkkeAL^d63D`YO$=6G7k zh7&htW9eO75R1A-Qfb%aQP=P;C6;l86<0VKO~*1B*X4BQ0C@(JJCdn8lCFtNEE`R^ z_%(i5iKl?1Q%Wk5O1LIt=?npvM{5f|AkT1Iab@DWV;Z;H<7soP@9rDC%GFZaSX)PB zu8g5(-CJr?=}{LrH9SL2eJM{doDq@Ia+nP0Q{9X8D}ca&fUFeF-Nf zF5q;6b@KVe#UbLjOPo_yPl?MqP~vpYmC&gP;q6$fjx8?!3|r!S(dsTKzRTt=DVvhO z4wqEo_ol*)?+;|st563BLZyPXYE*R_ugfjlB`b_&bgm*{{WTlEaJQ#Q0{(Vak19O zy^RRMCW4oiLHcRki+ub(tyV_Qx$~8&`EEu&C-!MssXYj%GTOnthKPM7WjCYVF6=g8 z9Rl8kZLXbeD-`YIF5M7dJ+q@Ofbyy#Oe2V0# z>s{35Q#NX$!jj^yITZX&~ z3Y1i#%-i^{2hpV#%sN4*`bx(a7r$W6>n(BlVHJXr+vqFGlvMSghFpgi5={x*#_M?y z`TfxIm#QA#2LkXdR7k!eVaTX3K}9zeZR2Tv`}idCJ&63cYT&*M!+9&q(4On& zH^7Iqd{Es^{djQ^w{un7(KmUAB;e!aqwoNqdqY7D+r?ZOH7~8}k-i3N9oA!*OS%qQ z#xi5!bSzq%NwLiBS?m(_@$$Rsf$R7gwXpu~G-EVxIbpz#0(;SrHr7+R9b+`sCafJ; z2e6J{-HG*HtPf&+0_!taU%^VtDQwSTm7up6YbDl8v98BjhqVoBC)Pf!w0sn0JkL5~ ziI@`eq~l6F5>5nT5xSOOrRxiPR27D%TeZgC;Q*sE4w5~YO`xb9e5K&{Om3s2_3tls8Ng`;Y@ z(E3#=l*mBw=5Qh#WB(wjS5hcPK@AxDzJ~B?E5<@}5tm4$lChm}g{C*BIF(k|4HBHdXHBaM*{PQ6_K-cF7 zuzpDT$F$!w4_FRVCabX41uB!$o{!_;_pEu(U7a!KU05C{p0n=>tCyXqyGK+l7@>K)-|keXl(E|L>tB%zR>VD4c}_`PQ#lGw#L%N zD;irGI~s3pyuI-YjlXPstFf}_qNXdGzSMM2(~p|wo7OcqHDBM{*_>^@r}^v6bIory z+ghqyT3SBSvZ>|9mS{_|Wvb=TmKR!ndF`9mT3Q{gWvy4YwzhV)ZfYHB9dG@7>)zHc zw?5MPRO@rCueAQG^=xbT#>S1`+W6$gXEq++`0~b|ZT#nrzuI_qqqVKD?Yy>&+AeFm zx~-wDt*xW&hPHvWZEeGCx3%qULnU46V^($8$4b}gQQF_5;m$gGmZ%JnROhTKt}7$y z@AX30W>#K9+~D|l`GfO72sao+%S#sAX?A()>%~oPSIxMRO~$StjU{90cm&CCIx=>B zyt%c-RWqvn^*6X`v~^@S8&5=WpT9;-D!6J!lBt@E5{~Ssq1*B}C&l@3ac%7e@s~+Q zHel{S+%n%F@~E4J+6db)mKu+3*b$2>V;hJ%PBRER120F_W2Q)Z*_&MG;w$xM7SiP;7MtyV{73@5)XGwQVCba~Xpe>kmJ;``B5 zb~q7ZR!czym&45VX97W9WPxKks5)#R~aZzvbUfBW1PPMfG_lB?qsF$rXii zI6WF+)(Dlw6O+su^G>cE54i@B5cJo${Znby-7wIbgSg6o7e4cA4(lY!B z&)^sfXT~PO30YzGtEpm+F(o`K6Uyux9)-#R=?VOy4%+ zl6j0K<;j$+$&fS1H2^IxIv~;Yl6;BF81IdL#%dvC&g^Q%j>Er(Gnk$H z;`jl%M6xGSxY!>}GRITY7EWfn7HnM)s2SGen0<_NWX1fZO!3PcPm%&=7l|WBDI0TA zS+vW8&cr2%5zLdO(sCq~Q8H|;#qr!<WJ#!Umq3*UvhGJ#;;lsW#9DAo38d@_!%=XjCi z>+R#I$qd}^6RPTUl-i9HAJy9J33M--RL;N`FH!DJBE)3)F`i&{91vN4`8pV1;((gx4MGmsF|DoUS)e?B&H<6<6luMW18SasCFB9pLfMoQYL0k8 zXLFpU%#YX;^>QQ}8H=f{{VPHrvnLu%=&uQV!cLthrbNc%VcehD!5nW9Jd4nvCJHBG zsceRKFydh9-SO`vKWxXHsw6T|P%y_kgdMey@R&G5iRbJi$j=9 zKdz{DSS*zDlqMnnsS+EPoYo}Vk*Y}IqPHs0(T%$796rNgxL7Sr)Yr*n?^UWG#FSac zR&g1#R1(DdCl41@TF9l6kYH*32BK8ZUKY@~>_|DcA zwRRO23!aQ9kZ6DqIE^r>u+l>C8cwJ4KtydcjMfsGj zfQeD8Gu$4}VDF&c9rDWkgQ4zzUthQE9)KOIbxvTQ-{0Ro=#>Y9B!R6gH98t_ zcgn$@{(+F(HQ46~`TG0V`DW>WH#9iV7i1Uca997}fPBLs8r$#d^@Z44U4-TeZE+94 z)&Xyq7m-4q>G1Y>yL=wRG`moz`aJ+3D-r$}ZMna+a^p)9LlFDjh`& zd%L{@a%UHEczFW^*(JKXKM<1L-Gl5>9dd8(_jQur<*xn#+1=SWz&@c%gZ}<4vJdT! zpWem>Y6<3ALEOsL=@Ow9bh$r(_$S>S9uJr49JN`3p#k^=t?)@*gl^^b`?~wQRQJso z6*vbz?3cAD3bJaQPwoi1d%D==X3XOcb+RkWSVxFmX-0Z{++AHh7_r_g3K~RLnMGY) z172@{U9F?up^)5K*VuZk?CI~rfY28TvOm(ro4uY;|A23sR~DhiuF+-PhzN8`J}}8a zstr1epVjCL0tfpvulV~B7HCdiFJh>6B~}pSp}(rM4Q5)v?b!tXs;%Jb3-;xhb!KL0 zYryOG27_n@=nJxXvzTnDlRLJ`fua5Z)}RY190y=D23%^zAdM|>0&6@6rE8$Smy?>7 zCBdY;wmKBJ4={w?mULAeVDEga(9ARwq&yG|JF!%*+1Zt3^-F}E%V_V~I& z^{hijwt1=34ux>kmPJ{8oy?=l(WiU+;SaSF(qO|nb#a$F7*c)3ygFQqXD}iU((|f z=MQFX?k zE^>F?Fo=c}alSFN|oS3Zbtc(gQqLYJA}?4&hBu&t4q!=@9k9$y+E@i zmu;=Vpd5IiYE=%nNCoJOnwNGlsvpves4~cNnJy{sbq6=e+)SS7QUUP&PjDOguB#j; zk)&4kAee$ULabx9JJG=W5-p7;yuEnb|&6&bLO{ysdJ;%CJMEa>y&oQB)J+5m6r^P_+0>VliLQXOUY=Aq{L z26Po&L?>E)TsG-Qx0eR~as#rd1y@Qs0Y@bqgII+D+SH26BLkziv&-FEUx)sqgB_hP zwH}v5OP~f^4lRKiaT&A(YQp7?0Sa+l7cGSvaG|5Kw$TY+J+5+eh;EEtVmJ&i$Azm( z*CI&yO$Hidnd2e@!Uxwhg(;syl7~~-WR#w`a;#g$Z};x2CX32)A`MxaLrk+uWs;1EAgIGRAhWt`vaH<@mYH)s4Edi} zd4(34?eDN(B;#KFL3@B-(* zVZR)EG%q95^$sqHF@DRea6(H!UJa2)b17GDLiSFM8^3Y&7G&s=!jg@_Uru=^?mP`nOBZGgJfYb;k@D6*E= zdGV$66_GND+u*CvtSDa(l~u@a9=Fb};T5WN&`j1<8j`sAxyGQzPUf9|0o0I17p!4d zVzai1byFF3D~6>%j3tN(Fe~fDuf_6i$!h#suuqSFTZ|L%+7!D!UypG&k;RP9xu^3X zT|%UJ`wz|p(gxf+ABm4{h|_t+IZMGa=}I)7svRSG|LQ#ehI$@yPI=9HM=_is}iF*(~>CWB!bk_5~^8kV|Y9?HwmV z#8S&2a1d|$ypMOkM=g)~0q@?VNwXy*I$_$qO9 z zzN1)4gUu4fFKN^l?W!gjbV}17d(z>{#nL{n#Tl(EG#x?5WAYksFuQGc@(C3%mtB%mV6E&RMYEnHT~qW zB=YJt;0Uj_B9(9W_ko`L5cLuqsu%e#e|Z+r>2v6z9Is+^p=q?@TS$L}WXD{}BXxaw`&wozZj#3l5YeM5Qa zn{m}{H`lWghj``4dNSt?LN)-IKsAMb2%*uj3||v7du$KSMWtVfnD`_SbBpG&CLqFN zuAI!c6NBc{aJE_V!@MlgT!dCxXUvf6J0YM#Gt5}BXd}rOa43Kt>Luny8xy_;Sd0n! zK}}<7u`vq=!cgeZB>Gm1Rcfmb>hIgZ5jN}jjeC1k{WPlBEcDgA{JPz(a#nK=|Kgre z1>gS*m4mZF=%LX#-|+vYLM5ETzsSe)8h>c~IR&2Z;W8sF|IT4`^LyY?kFl{@fwc6& z6(*$lOdh8*kMmX@r!9|jI*(JA$Ds#tOr^K1&*RMJaW2i{yq?FQFHV{?H=oSo6ffnp z(0BRFxn9c4brya$b6(8jP+OQdO%qEw&3U<+pIgdld1a}d=A(I>XYx3Q^EglBac1*4 z59V>6$m86b$9XJ|vonuF(P{EV%SayQ;XKYj9_PV4PDdW+fjmxA9_Jf*oT@y|!8{Hf zjZC#@x^5|_*^o;;hkOW}ymQD{GOC@^d>dM~WAMNq@4bv229g`2fHL+1kO>@kY3yBg zWK+M|Dmc&s{?g??a`dAyI%l(mykIUc@4RlABP`kXi(I|PW;*tn^$_PRKzn)Ji9=q> z=TLmnkiOFBM`8=)YgHd&7YmILv{EjCG>Q#RiQNuthD^C`1ah}(53WkY>xb+lKkm3x zJ=?+2`l--E4;O3EE0C{XqxE-zd{b=rd4kyeHa3T09Yvi$p26lQRvmd2DXkB}9_{5x zlg$=Xam<8V1jJnTS|BHLWy!L$d`UQ|O6Y>s_Yfw5J&4tZnFyHCk` zC@PS*fV7)zEo363Mp{wRZvruU>@W~oXv7elV?c`46!ij% z-Y`*AZwMsHQ+oXT6LRh07Lp#^Va9`6y`h_tiwdCLEnYL8H(9sFUePr%h!wskwANcLRA5di1u~2IPzh8KYcqjGpVWK%Ovh zz6Rti6Y@7ejsihosa|~?2&HsA$ABCJqL=S2sV63k~TGm?}bC|r;TA7Y1H45S)c zJ$@R2w3=!f0CEZ(-JY9)h=t0+KoYVn#*C`=^XM2q-tYh;yZP$HX0kw zj^aI~aB>t6m#9hp^*T*7Je-bAYLY}8FY(Boi9|2%$6!bbDq(zzZlWF2wD}*q8%fe= zcY`N4GlmrIQs<`7RU1@v>9_KXR`M_PSMT1vJ6AR@5l4-bJc_A#y!aEP`^r?F4)I&f z9NLwXaU%my%ET2lqmsacCgmZyd94F0L_OnvU@|FN_49Qx5rq zG)xZT-96-*h|!I3S^at+s*EutOvQbmin_*8W(ekg>pE&JsXt`F6l$nd68i`Mjs(NmnH&{TIgf>*!b^mru0 zn}fdvkQz}Z)G?bq9_OCo@2AK)0iNHArSaMhwIF{jPtIhA@sx=Y8^;S4{NszFb#W`7 z&Jy(M%N&U>gNS3Aywt76PegKuiXf#PkWeyFyjv>ci;$M}nlq{Z|Z7SX&*0!)0{ zat!Yc%Hn1htGy!@-cgG{ScXZkw?_@%4sW-wk5>`@E5lfsmBUYqc+JIQvG4?5xX5{! zhmU$?p|(D1fe*T9AC75nWmK;v2&VbT$MkBG8r3;O^(YMpcq=TGF*OZ}=hfEYjz>rs zFK5v^sUjA{r*OopG2<~jUV!HI8@@uroyyH#+L3&CAWSQLWsgww&}haj$3E|zS~lZ@ z2(nCG)+D0Bkz85~h|$Aq)EtYy5UqUR9Q1{`H0p(>0=(ea)G?`V1(giB(;1fFZaPCUL8}! z8oR#mwqh_^_KhQkI_R}#*)S%^xt^8(jo9Ul5gt}A4b%ovPu_9N$p6qR+^jl`qSQ1- zWU?u3h&bnPsuoF~I)b)k9S4DExj0qR)Su`THu`EV-r*KQsn)@E#L~%F0&il3k=q!4 zqH#s%Tz>czvi!1mpUZu1$z0ngP7%}rF{FPXN)O8w4;tr==DdEta<|q7I*h4$-E)UE lnG6(N7KawYi3VYKaFzmIiz~Iq>opZkv|^xgzo-}2{}-c8zcl~= literal 0 HcmV?d00001 diff --git a/yaml parse test/test.py b/yaml parse test/test.py new file mode 100644 index 0000000..c273404 --- /dev/null +++ b/yaml parse test/test.py @@ -0,0 +1,9 @@ +from timeit import default_timer as timer +from ctypes import * + + +proot = CDLL("./fastproot/target/debug/fastproot.dll") +start = timer() +proot.test() +end = timer() +print(end - start) \ No newline at end of file diff --git a/yaml parse test/testAnimationYaml.yaml b/yaml parse test/testAnimationYaml.yaml new file mode 100644 index 0000000..b639bdc --- /dev/null +++ b/yaml parse test/testAnimationYaml.yaml @@ -0,0 +1,83 @@ +animations: + - name: example animation + description: This is the example animation to showcase some of the options + loop_count: 1 # Number of loops (1 for single play, 0 for infinite) + + # Define the trigger for this animation. + # In this case, it triggers when a button is pressed. + trigger: Butn_0001_0001 #Button 1 pressed + + # Specify whether this animation can be overridden by another animation. + overrideable: true + + graphics: + - type: transition + to_file: animation1_frame1.png # Specify the new PNG image for the transition + duration: 10 # The amount of frames the transition will take. + + - type: image + source_file: animation1_frame1.png + duration: 10 # The amount of frames the image will be shown. + # This is the initial frame of the animation. + + - type: transition + to_file: animation1_frame2.png + duration: 10 # The amount of frames the transition will take. + # This is a transition from the initial frame to the next frame. + # Transitions can be used to create smooth animations. + + # You can add more graphics elements as needed for this animation. + + # Additional comments or configuration options for Animation1 can go here. + # For example, you can specify the duration, sound effects, or other details. + + - name: blink + description: Animation for blinking + loop_count: 1 + + trigger: blinkTimer #Button 2 pressed + + overrideable: true # blink can be interupted at any time + + graphics: + - type: transition # close the eye from whatever the current state + to_file: dizzyFace.png + duration: 5 + + - type: image # hold eye closed + source_file: dizzyFace.png + duration: 10 + + - type: transition # open the eye again from being closed + to_file: neutral.png + duration: 5 + + - name: openEye + description: Animation for blinking + loop_count: 1 + + trigger: boot + + overrideable: true # blink can be interupted at any time + + graphics: + - type: image # hold eye closed + source_file: eyesClosed_neutral.png + duration: 1 + + - type: transition # open the eye again from being closed + to_file: dizzyFace.png + duration: 5 + + - name: make dizzy + description: Animation for making dizzy + loop_count: 1 + + trigger: Butn_0002_0001 + + overrideable: true # blink can be interupted at any time + + graphics: + - type: transition # open the eye again from being closed + to_file: dizzyFace.png + duration: 5 \ No newline at end of file