diff --git a/yaml parse test/Point2D.py b/yaml parse test/Point2D.py index 980eb95..2b0400d 100644 --- a/yaml parse test/Point2D.py +++ b/yaml parse test/Point2D.py @@ -1,4 +1,5 @@ import hashlib +from itertools import permutations import json import os.path import math @@ -46,6 +47,7 @@ class Point2D: def divide_points_into_groups(points): + print("divide_points_into_groups: ", len(points)) result = fastproot.divide_into_groups(points) res = [[Point2D(p[0], p[1], p[2]) for p in arr] for arr in result] return res @@ -90,7 +92,27 @@ def calculate_distance(points1: list[Point2D], points2: list[Point2D]) -> float: center2_y = (points2[0].y + points2[-1].y) / 2 return ((center1_x - center2_x) ** 2 + (center1_y - center2_y) ** 2) ** 0.5 + def pair_groups(set_a: list[list[Point2D]], set_b: list[list[Point2D]]) -> list[tuple[list[Point2D], list[Point2D]]]: + # print("set_a", [[(b.x, b.y) for b in i] for i in set_a]) + # print("set_b", [[(b.x, b.y) for b in i] for i in set_b]) + result = [] + if len(set_a) <= len(set_b): + result = fastproot.pair_groups(set_a, set_b) + else: + for (b, a) in fastproot.pair_groups(set_b, set_a): + result.append((a,b)) + res = [ + ( + [Point2D(a1[0], a1[1], a1[2]) for a1 in a], + [Point2D(b1[0], b1[1], b1[2]) for b1 in b] + ) for (a, b) in result + ] + #res = [[Point2D(p[0], p[1], p[2]) for p in arr] for arr in result] + return res + + +def _pair_groups(set_a: list[list[Point2D]], set_b: list[list[Point2D]]) -> list[tuple[list[Point2D], list[Point2D]]]: pairs = [] # Create dictionaries to store bounding boxes for each group @@ -131,7 +153,7 @@ def pair_groups(set_a: list[list[Point2D]], set_b: list[list[Point2D]]) -> list[ def mirror_points(points: list[Point2D]) -> list[Point2D]: mirrored_points = [] for point in points: - mirrored_x = 128 - point.x # Calculate the mirrored x-coordinate + mirrored_x = 127 - point.x # Calculate the mirrored x-coordinate mirrored_point = Point2D(mirrored_x, point.y, point.color) mirrored_points.append(mirrored_point) return mirrored_points @@ -174,7 +196,7 @@ def generate_point_array_from_image(image): width, height = image.size pixel_array = [] - image.save(LOADED_IMAGE_PATH) + #image.save(LOADED_IMAGE_PATH) for y in range(height): for x in range(width): @@ -204,13 +226,18 @@ def generate_image_from_point_array(points: list[Point2D], width: int, height: i return image - def interpolate_point_pairs(pairs: list[tuple[Point2D, Point2D]], percentage: float) -> list[Point2D]: + result = fastproot.interpolate_point_pairs(pairs, percentage) + res = [Point2D(p[0], p[1], p[2]) for p in result] + return res + +def _interpolate_point_pairs(pairs: list[tuple[Point2D, Point2D]], percentage: float) -> list[Point2D]: interpolated_points:list[Point2D] = [] for pair in pairs: point1, point2 = pair interpolated_point = point1.interpolate(point2, percentage) - interpolated_points.append(interpolated_point) + if not interpolated_point in interpolated_points: + interpolated_points.append(interpolated_point) return interpolated_points @@ -229,7 +256,7 @@ def pair_points(points1: list[Point2D], points2: list[Point2D]) -> list[tuple[Po duplicated_points = np.random.choice(points1, size=num_duplicates).tolist() points1 += duplicated_points - row_ind, col_ind = fastproot.solve(points1, points2) + row_ind, col_ind = (fastproot.solve(points1, points2)) # Create pairs of points based on the optimal assignment pairs = [] diff --git a/yaml parse test/fastproot.so b/yaml parse test/fastproot.so new file mode 100644 index 0000000..1a05ea8 Binary files /dev/null and b/yaml parse test/fastproot.so differ diff --git a/yaml parse test/prootOS.py b/yaml parse test/prootOS.py index 2d55faa..15d3a34 100644 --- a/yaml parse test/prootOS.py +++ b/yaml parse test/prootOS.py @@ -179,6 +179,9 @@ class RenderingLayer: self.frame_rate = frame_rate # Set the desired frame rate self.frame_duration = 1.0 / frame_rate # Calculate the frame duration self.animation_queue = [] # Initialize the animation queue + + self.previous_to = [] + self.previous_point_pairs_groups = [] def play_animation_by_name(self, animation_name): for animation in self.animations: @@ -209,15 +212,16 @@ class RenderingLayer: animation_queue.append({'type': graphic.get('type'), 'point_array': point_array}) if graphic.get('type') == 'transition': + to_file_name = graphic.get('to_file') to_point_array = graphic.get('to_point_array') duration = graphic.get('duration', 1) # Default duration is 1 frame if not specified # Add frames to the queue based on the specified duration for i in range(int(duration)): - animation_queue.append({'type': graphic.get('type'), 'to_point_array': to_point_array, 'stepPercentage' : (1/(int(duration)-i))}) + animation_queue.append({'type': graphic.get('type'), 'to_point_array': to_point_array, 'to_file_name': to_file_name, 'additionalStepPercentage' : (1/(int(duration)-i)), 'stepPercentage' : ((i+1)/duration)}) return animation_queue - + def start_rendering(self): frameCount = 0 new_image = Image.new("RGB", (128, 32), "black") @@ -226,8 +230,10 @@ class RenderingLayer: pairgrouptime = 0 pairpointtime = 0 imagingtime = 0 + interpolatetime = 0 transitionFrameCount = 1 + while True: start_time = time.time() # Get the current time before rendering @@ -245,38 +251,87 @@ class RenderingLayer: print("image generated") elif current_animation_action.get('type') == "transition": - transitionFrameCount += 1 - divtime_start = time.time() - groupsa = divide_points_into_groups(self.current_point_array) - groupsb = divide_points_into_groups(current_animation_action.get('to_point_array')) - devisiontime += time.time() - divtime_start - pairgrouptime_start = time.time() - paired_groups = pair_groups(groupsa, groupsb) - pairgrouptime += time.time() - pairgrouptime_start - new_point_array = [] - for pair in paired_groups: - pairpointtime_start = time.time() - point_pairs = pair_points(pair[0], pair[1]) - pairpointtime += time.time() - pairpointtime_start - print(str(current_animation_action.get('stepPercentage'))) - new_point_array += interpolate_point_pairs(point_pairs, current_animation_action.get('stepPercentage')) + frame_time_start = time.time() + + + if self.previous_to == str(current_animation_action.get('to_file_name')): + + + point_pairs_groups = self.previous_point_pairs_groups + + new_point_array = [] + for point_pairs in point_pairs_groups: + interpolatetime_start = time.time() + new_point_array += interpolate_point_pairs(point_pairs, current_animation_action.get('stepPercentage')) + print("interpolationg took: " + str(time.time() - interpolatetime_start) + " sec. (cached)") + print("step percentate for this transition step is: " + str(current_animation_action.get('stepPercentage'))) + + else: + print("starting transition generation") + transitionFrameCount += 1 + divtime_start = time.time() + + groupsa = divide_points_into_groups(self.current_point_array) + groupsb = divide_points_into_groups(current_animation_action.get('to_point_array')) + devisiontime += time.time() - divtime_start + + print("groups divided, len(groupsa), len(groups(b)=", len(groupsa), len(groupsb)) + + pairgrouptime_start = time.time() + paired_groups = pair_groups(groupsa, groupsb) + pairgrouptime += time.time() - pairgrouptime_start + + + print("paired_groups generated, len(paired_groups)=", len(paired_groups)) + + self.previous_point_pairs_groups = [] + new_point_array = [] + for pair in paired_groups: + pairpointtime_start = time.time() + point_pairs = pair_points(pair[0], pair[1]) + self.previous_point_pairs_groups.append(point_pairs) + pairpointtime += time.time() - pairpointtime_start + print("step percentate for this transition stepp is: " + str(current_animation_action.get('additionalStepPercentage'))) + + interpolatetime_start = time.time() + new_point_array += interpolate_point_pairs(point_pairs, current_animation_action.get('additionalStepPercentage')) + print("interpolationg took: " + str(time.time() - interpolatetime_start) + " sec.") + + print("face feature interpolated len(new_point_array) =", len(new_point_array)) + + # set previous "to" screen. This is used + self.previous_to = str(current_animation_action.get('to_file_name')) + imagingtime_start = time.time() + new_image = generate_image_from_point_array(new_point_array + mirror_points(new_point_array), 128, 32) + #new_image.save("output/frameNumber"+str(frameCount)+".png") imagingtime += time.time() - imagingtime_start self.current_point_array = new_point_array + print("transition generated, len(new_point_array) =", len(new_point_array)) + + print("creating transition frame took: " + str(time.time() - frame_time_start) + " sec.") + print("================== end frame ==================") + else: + print("unknown action: ", current_animation_action) + + print("setting image to canvas") offscreen_canvas = matrix.CreateFrameCanvas() offscreen_canvas.SetImage(new_image, unsafe=False) + print("pushing image to matrix") matrix.SwapOnVSync(offscreen_canvas) + print("pushing image done") # Save the image to a file with the desired format and file name # new_image.save("output/frameNumber"+str(frameCount)+".png") - frameCount += 1 + frameCount += 1 + # new_image.save("output/frameNumber"+str(frameCount)+".png") elapsed_time = time.time() - start_time # Calculate time elapsed during rendering @@ -285,11 +340,12 @@ class RenderingLayer: print("remaining time in frame: " + str(sleep_time)) if sleep_time > 0: time.sleep(sleep_time) - print("average time cost per part for transition frames:") - print("devisiontime :" + str(devisiontime /transitionFrameCount )) - print("pairgrouptime :" + str(pairgrouptime /transitionFrameCount)) - print("pairpointtime :" + str(pairpointtime /transitionFrameCount)) - print("imagingtime :" + str(imagingtime /transitionFrameCount)) + # print("average time cost per part for transition frames:") + # print("devisiontime :" + str(devisiontime /transitionFrameCount )) + # print("pairgrouptime :" + str(pairgrouptime /transitionFrameCount)) + # print("pairpointtime :" + str(pairpointtime /transitionFrameCount)) + # print("interpolatetime :" + str(interpolatetime /transitionFrameCount)) + # print("imagingtime :" + str(imagingtime /transitionFrameCount)) devisiontime = 0 @@ -345,7 +401,7 @@ def main(): mqtt_client.loop_start() # Initialize the rendering layer - rendering_layer = RenderingLayer(animations, frame_rate=10) + rendering_layer = RenderingLayer(animations, frame_rate=40) rendering_thread = threading.Thread(target=rendering_layer.start_rendering) rendering_thread.start()