diff --git a/.vscode/settings.json b/.vscode/settings.json
index 46cd5b5c2e7c46463331760e9df7a00b87ab3d12..7b016a89fbafd4b802a61d3207cf76f7c2253c6e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,7 +1,3 @@
 {
-<<<<<<< HEAD
-    "java.configuration.updateBuildConfiguration": "interactive"
-=======
     "java.compile.nullAnalysis.mode": "automatic"
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
 }
\ No newline at end of file
diff --git a/README.md b/README.md
index 096c7519a73d52211ccb27cd5097cfb45ca8e189..9abd6964ac5380a827bfe7fd37ce68c39fc0d470 100644
--- a/README.md
+++ b/README.md
@@ -71,7 +71,6 @@ graph LR;
 
 ### Features
 
-<<<<<<< HEAD
 - [10%] Object Recognition: using YOLOv4, we used the weights from the model, and created a YoloNet, which is trained as a convolutional neural network that detects objects within a 2D image.
 - [30%] Performance Optimization: we developed the ObjectSet data structure, that iterates over the frames of a video, and on each frame, incorporates more information. The ObjectSet holds a list of PointSets, and each PointSet represents a collection of 3D points that constitute an object within the real world. At each iteration, we find new candidate objects from the new frame, and iteratively check if these new objects are instances of previously found PointSets, or previously undiscovered objects within the real world.
     > For this feature, we have not fully optimized it, and we plan on improving in before the final report
@@ -134,4 +133,3 @@ The following links:
 - The database used was via [MongoDB](https://www.mongodb.com/), and the database itself was hosted on MongoDB provided free-tier server
 - The testing was done using [JUnit](https://junit.org/junit5/) 
 - The mathworks Monocular VSLAM tutorial is used to guide our vslam_implemenation matlab script [VSLAM](https://www.mathworks.com/help/vision/ug/monocular-visual-simultaneous-localization-and-mapping.html)
-
diff --git a/pom.xml b/pom.xml
index 69f291efa263a53a52fe659b2001662bfe6c0164..bb3cee66586d3d510052f03d24c67f770f7fd713 100644
--- a/pom.xml
+++ b/pom.xml
@@ -8,11 +8,12 @@
     <artifactId>vslam-objects</artifactId>
     <version>1.0</version>
 
-     <properties>
+    <properties>
         <maven.compiler.source>17</maven.compiler.source>
         <maven.compiler.target>17</maven.compiler.target>
         <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
         <exec.mainClass>top.BackendJava</exec.mainClass>
+        <!-- <exec.mainClass>yolo.YOLODetector</exec.mainClass> -->
     </properties>
 
     <build>
@@ -125,4 +126,4 @@
         </dependency>
 
     </dependencies>
-</project>
\ No newline at end of file
+</project>
diff --git a/run_vslam_mono.sh b/run_vslam_mono.sh
index 13ae3dde4382eacd0683b08318dc1a96b75266c8..456f0c2f322133f05d820990f8eb601f28b619d7 100644
--- a/run_vslam_mono.sh
+++ b/run_vslam_mono.sh
@@ -1,37 +1,10 @@
 #!/bin/bash
 
 # Define the script that runs the VSLAM implementation
-<<<<<<< HEAD
-<<<<<<< HEAD
-MATLAB_SCRIPT="/src/main/java/vslam/vslam_implementation_rgbd.m"
-
-# Check if the correct number of arguments was provided
-if [ "$#" -ne 1 ]; then
-    echo "Usage: $0 {tum_rgbd_dataset|imperial_college_london}"
-    exit 1
-fi
-
-# Determine which dataset to use based on the argument provided
-if [ "$1" = "tum_rgbd_dataset" ]; then
-    DATASET_NAME="tum_rgbd_dataset"
-elif [ "$1" = "imperial_college_london" ]; then
-    DATASET_NAME="imperial_college_london"
-else
-    echo "Invalid dataset name. Choose either 'tum_rgbd_dataset' or 'imperial_college_london'"
-    exit 2
-fi
-=======
-MATLAB_SCRIPT="/src/main/java/vslam/vslam_implementation.m"
-
-# Define the dataset name
-DATASET_NAME="self_made_dataset"
->>>>>>> divider
-=======
 MATLAB_SCRIPT="/src/main/java/vslam/vslam_implementation.m"
 
 # Define the dataset name
 DATASET_NAME="self_made_dataset"
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
 
 # Navigate to the MATLAB script directory (assuming MATLAB can be called from command line)
 cd src/main/java/vslam
diff --git a/src/main/java/database/MongoDBInteraction.java b/src/main/java/database/MongoDBInteraction.java
index 76c683bebe649d014855f89bcd8229809f594e00..626c391f618d51c3c91d117ef561db41c7808b08 100644
--- a/src/main/java/database/MongoDBInteraction.java
+++ b/src/main/java/database/MongoDBInteraction.java
@@ -34,6 +34,7 @@ public class MongoDBInteraction {
         this.mongoClient = MongoClients.create(settings);
         this.database = mongoClient.getDatabase("Objects");
         this.objectCollection = database.getCollection("objectSets");
+        System.out.println("MongoDB Connection Established");
     }
 
 
@@ -45,8 +46,10 @@ public class MongoDBInteraction {
         try {
             Document doc = objectCollection.find().sort(new Document("index", -1)).first();
             if (doc == null) {
+                System.out.println("No document found.");
                 return null;
             } else {
+                System.out.println("Document found: " + doc.toJson());
                 return convertDocumentToObjectSet(doc);
             }
         } catch (Exception e) {
@@ -57,11 +60,13 @@ public class MongoDBInteraction {
 
     private ObjectSet convertDocumentToObjectSet(Document doc) {
         if (doc == null) {
+            System.out.println("Document is null, no conversion possible.");
             return null;
         }
 
         List<Document> pointSetDocs = doc.getList("objectSets", Document.class);
         if (pointSetDocs == null || pointSetDocs.isEmpty()) {
+            System.out.println("No point sets found in document.");
             return new ObjectSet();
         }
 
@@ -71,7 +76,7 @@ public class MongoDBInteraction {
             if (pointSet != null) {
                 objectSet.objects.add(pointSet);
             } else {
-                System.out.println("Failed to convert point set document");
+                System.out.println("Failed to convert point set document: " + pointSetDoc.toJson());
             }
         }
         return objectSet;
@@ -79,6 +84,7 @@ public class MongoDBInteraction {
 
     private PointSet convertDocumentToPointSet(Document doc) {
         if (doc == null) {
+            System.out.println("PointSet document is null.");
             return null;
         }
 
@@ -93,6 +99,7 @@ public class MongoDBInteraction {
 
         List<Document> pointsDocs = doc.getList("points", Document.class);
         if (pointsDocs == null) {
+            System.out.println("No points found in point set document.");
             return new PointSet(idx);
         }
 
diff --git a/src/main/java/object_detection/.DS_Store b/src/main/java/object_detection/.DS_Store
index aff5713ce0a01c7592b2fac9d9fad1c285ba7641..8303d96719256ee4a20806fcab63eb45bec574c3 100644
Binary files a/src/main/java/object_detection/.DS_Store and b/src/main/java/object_detection/.DS_Store differ
diff --git a/src/main/java/object_detection/ObjectDetector.java b/src/main/java/object_detection/ObjectDetector.java
index 5fa2a8302072ec04c5c93d5b43fe29f6dfc858a9..82decce9ecdc42824cbfcb9b548a2f387f725d2c 100644
--- a/src/main/java/object_detection/ObjectDetector.java
+++ b/src/main/java/object_detection/ObjectDetector.java
@@ -3,7 +3,6 @@ package object_detection;
 import com.opencsv.exceptions.CsvValidationException;
 import database.MongoDBInteraction;
 import object_detection.types.*;
-import org.bytedeco.ffmpeg.avutil.Cmp_Const_Pointer_Const_Pointer;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -22,7 +21,6 @@ public class ObjectDetector {
     public static void startProcess(String dataset) throws IOException, CsvValidationException {
 
         // for now, we can just set paths to the directories that hold keyframes and featurepoint CSVs
-<<<<<<< HEAD
         String bbox_dir_pth = "src/main/java/vslam/BoundedInfo";
         String pose_dir_path = "src/main/java/vslam/CameraPoses";
 
@@ -32,24 +30,6 @@ public class ObjectDetector {
         } else if (dataset.equals("2")) {
             bbox_dir_pth = "src/main/java/vslam/imperial_london/BoundedInfoTUM";
             pose_dir_path = "src/main/java/vslam/imperial_london/CameraPosesTUM";
-=======
-        String bbox_dir_pth;
-        String pose_dir_path;
-        CameraIntrinsics intrinsics;
-        List<Point> pointCloud;
-
-        if(dataset.equals("1")){
-            bbox_dir_pth = "src/main/java/vslam/tum_rgbd/BoundedInfo";
-            pose_dir_path = "src/main/java/vslam/tum_rgbd/CameraPoses";
-            intrinsics = new CameraIntrinsics("src/main/java/vslam/tum_rgbd/CameraIntrinsics.csv");
-            pointCloud = Downsampler.get_voxels("src/main/java/vslam/tum_rgbd/pointcloud.csv", 0.05F);
-        }
-        else{
-            bbox_dir_pth = "src/main/java/vslam/imperial_london/BoundedInfo";
-            pose_dir_path = "src/main/java/vslam/imperial_london/CameraPoses";
-            intrinsics = new CameraIntrinsics("src/main/java/vslam/imperial_london/CameraIntrinsics.csv");
-            pointCloud = Downsampler.get_voxels("src/main/java/vslam/imperial_london/pointcloud.csv", 0.05F);
->>>>>>> d54dedd1dda103cf3c9918bc5acfe073621f5ebb
         }
 
         // get files
@@ -63,7 +43,6 @@ public class ObjectDetector {
         /* #################################################
         In the section below, we create a new ObjectSet, and iterate over each Keyframe
          ################################################## */
-<<<<<<< HEAD
         CameraIntrinsics intrinsics = new CameraIntrinsics("src/main/java/vslam/CameraIntrinsics.csv");
         List<Point> pointCloud = Downsampler.get_voxels("src/main/java/vslam/pointcloud.csv", 0.05F);
 
@@ -75,8 +54,6 @@ public class ObjectDetector {
             pointCloud = Downsampler.get_voxels("src/main/java/vslam/imperial_london/pointcloudTUM.csv", 0.05F);
         }
 
-=======
->>>>>>> d54dedd1dda103cf3c9918bc5acfe073621f5ebb
         ObjectSet os = new ObjectSet(intrinsics, pointCloud);
 
         // iterate through each frame, create the frame, then process it
@@ -85,7 +62,18 @@ public class ObjectDetector {
             Frame f = new Frame(bbox_CSVs[i].getPath(), cp);
             os.processFrame(f);
             System.out.println("Processed frame " + i);
+            if(i == 5){
+                break;
+            }
+        }
+
+        Point[] pp = new Point[pointCloud.size()];
+        int i = 0;
+        for(Point p : pointCloud){
+            pp[i] = p;
+            i++;
         }
+        os.objects.add(new PointSet(0, pp));
 
         // update MongoDB
         MongoDBInteraction mdbi = new MongoDBInteraction();
@@ -114,11 +102,8 @@ public class ObjectDetector {
         return f_arr;
     }
 
-<<<<<<< HEAD
     public static void main(String[] args) throws IOException, CsvValidationException {
         startProcess("1");
     }
 
-=======
->>>>>>> d54dedd1dda103cf3c9918bc5acfe073621f5ebb
 }
diff --git a/src/main/java/object_detection/types/CameraIntrinsics.java b/src/main/java/object_detection/types/CameraIntrinsics.java
index b92eb85f7a70e644792c2a4f3cd81b8c8cd73941..b6e0fdc5670076d0082275e87e18a0fcf28a2437 100644
--- a/src/main/java/object_detection/types/CameraIntrinsics.java
+++ b/src/main/java/object_detection/types/CameraIntrinsics.java
@@ -46,4 +46,8 @@ public class CameraIntrinsics {
     float[] ImageSize;
     DMatrixRMaj K;
 
+
+    public static void main(String[] args) throws FileNotFoundException {
+        CameraIntrinsics intrinsics = new CameraIntrinsics("/Users/roku/IdeaProjects/group8/src/main/java/vslam/CameraIntrinsics.csv");
+    }
 }
diff --git a/src/main/java/object_detection/types/Frame.java b/src/main/java/object_detection/types/Frame.java
index 6cd374c2b9a3e136d34872137f288d4287260980..f663d6bb9140c0f0d2d0bd147d00edb7896dcba4 100644
--- a/src/main/java/object_detection/types/Frame.java
+++ b/src/main/java/object_detection/types/Frame.java
@@ -73,9 +73,9 @@ public class Frame {
 
             // check if point is visible in current frame
             if(projPoint.get(0,2) > 0){
-                if(x_div < intrinsics.ImageSize[1]
+                if(x_div <= intrinsics.ImageSize[1]
                     && x_div >= 0
-                    && y_div < intrinsics.ImageSize[0]
+                    && y_div <= intrinsics.ImageSize[0]
                     && y_div >= 0){
                     res.put(i, new Point2D((float) x_div, (float) y_div));
                 }
@@ -93,4 +93,14 @@ public class Frame {
     List<BoundingBox2D> boxes;
     CameraPose camera;
 
+
+    public static void main(String[] args) throws IOException, CsvValidationException {
+        System.out.println("Trying projection:");
+        CameraIntrinsics ci = new CameraIntrinsics("src/main/java/vslam/CameraIntrinsics.csv");
+        List<Point> pc = Downsampler.get_voxels("src/main/java/vslam/pointcloud.csv", 0.05F);
+
+        CameraPose cp = new CameraPose("src/main/java/vslam/CameraPoses/Pose_0002.csv");
+        Frame f = new Frame("src/main/java/vslam/BoundedInfo/KeyFrame_0002.csv", cp);
+
+    }
 }
diff --git a/src/main/java/object_detection/types/ObjectSet.java b/src/main/java/object_detection/types/ObjectSet.java
index 3dd404e0661c4191614fcd3b231e38ff64c44920..ea5f776c388875f137f0de983e8026310ee31aee 100644
--- a/src/main/java/object_detection/types/ObjectSet.java
+++ b/src/main/java/object_detection/types/ObjectSet.java
@@ -32,6 +32,8 @@ public class ObjectSet {
         // 2) Create a bitmap that holds the index of the bounding box that contains the pixel at bitmap[i][j]
         int[][] bitmap = new int[(int) intrinsics.ImageSize[0]][(int) intrinsics.ImageSize[1]];
 
+        System.out.println(bitmap.length);
+        System.out.println(bitmap[0].length);
         for(int b = 0; b < f.boxes.size(); b++){
             BoundingBox2D bbox = f.boxes.get(b);
             // for each box, write to all points in bitmap that fall in box
@@ -59,7 +61,7 @@ public class ObjectSet {
         // 4) Now that we have a full list of candidate objects, we can do object resolution by combining candidates with overlapping points
         for(PointSet c : candidates){
             if(c.pset.size() > 2){
-                this.reconcileCandidate(c, 0.7);
+                this.reconcileCandidate(c, 0.8);
             }
         }
 
@@ -74,14 +76,18 @@ public class ObjectSet {
             // calculate overlapping points, if beyond some threshold, find intersection of the sets
             int count = 0;
             for (Point p : obj.pset) {
-                if(c.pset.contains(p)){
-                    count++;
+                for (Point cp : c.pset) {
+                    if (Point.equals(p, cp, 0.0001F)) {
+                        count++;
+                        break;
+                    }
                 }
             }
 
             if (count > (c.pset.size() * threshold)) {
                 //combine via intersection and return, since we know that we just combined those objects
                 obj.pset.retainAll(c.pset);
+                System.out.println("reconciling");
                 return;
             }
         }
diff --git a/src/main/java/top/BackendJava.java b/src/main/java/top/BackendJava.java
index b5cb9aa9833128eedc69bd04bcf4f86d85ea8857..e7c796bdbceae05b730da8e25a4a1b998543f0f8 100644
--- a/src/main/java/top/BackendJava.java
+++ b/src/main/java/top/BackendJava.java
@@ -36,11 +36,24 @@ public class BackendJava {
         private final MongoDBInteraction dbInteraction = new MongoDBInteraction();
         private final Gson gson = new GsonBuilder().create();
 
+        @GetMapping("/{index}")
+        public ResponseEntity<String> getObjectSet(@PathVariable int index) {
+            try {
+                ObjectSet objectSet = dbInteraction.retrieveLatestObjectSet();
+                if (objectSet != null) {
+                    return ResponseEntity.ok(gson.toJson(objectSet));
+                } else {
+                    return ResponseEntity.notFound().build();
+                }
+            } catch (NumberFormatException e) {
+                return ResponseEntity.badRequest().body("{\"error\":\"Invalid index format\"}");
+            } catch (Exception e) {
+                return ResponseEntity.internalServerError().body("{\"error\":\"Internal Server Error: " + e.getMessage() + "\"}");
+            }
+        }
+
         @GetMapping("/getObjects")
-        public ResponseEntity<String> getObjects(@RequestParam String dataset) throws CsvValidationException, IOException {
-            System.out.println("==========================");
-            ObjectDetector.startProcess(dataset);
-            System.out.println("==========================");
+        public ResponseEntity<String> getObjects() {
             try {
                 ObjectSet objectSet = dbInteraction.retrieveLatestObjectSet();
                 if (objectSet != null && objectSet.objects != null && !objectSet.objects.isEmpty()) {
@@ -74,6 +87,19 @@ public class BackendJava {
                 return ResponseEntity.internalServerError().body("{\"error\":\"Failed to retrieve all objects: " + e.getMessage() + "\"}");
             }
         }
+
+
+
+        @GetMapping("/getPointCloud")
+        public ResponseEntity<String> getPoints() {
+            try {
+                Map<String, List<String>> result = new HashMap<>();
+                result.put("yes", Collections.singletonList("true"));
+                return ResponseEntity.ok(gson.toJson(result));
+            } catch (Exception e){
+                return ResponseEntity.internalServerError().body("{\"error\":\"Failed to retrieve all objects: " + e.getMessage() + "\"}");
+            }
+        }
     }
 
     @Controller
@@ -82,6 +108,12 @@ public class BackendJava {
         private final MongoDBInteraction dbInteraction = new MongoDBInteraction();
         private final Gson gson = new GsonBuilder().create();
 
+        @RequestMapping("/hello")
+        @ResponseBody
+        public String hello() {
+            return "Hello There";
+        }
+
         @RequestMapping("/")
         public String index() {
             return "html/index";
@@ -122,6 +154,16 @@ public class BackendJava {
             return "style/main.css";
         }
 
+        @RequestMapping("/js/app.js")
+        public String getApp() {
+            return "js/app.js";
+        }
+
+        @RequestMapping("/js/pointCloud.js")
+        public String getPC() {
+            return "js/pointCloud.js";
+        }
+
         @RequestMapping("/js/buildPC.js")
         public String getBuild() {
             return "js/buildPC.js";
diff --git a/src/main/java/vid2frames/Vid2Frames.java b/src/main/java/vid2frames/Vid2Frames.java
index c9b189bd483bfa0ffc17751c3c972c3edc630713..44c8cc76da01bd0af5aa19e47407abf8910e4ad2 100644
--- a/src/main/java/vid2frames/Vid2Frames.java
+++ b/src/main/java/vid2frames/Vid2Frames.java
@@ -28,19 +28,19 @@ public class Vid2Frames {
 
             Java2DFrameConverter converter = new Java2DFrameConverter();
             Frame frame;
-            int frameNumber = 0;
+            int frameNumber = 0; // Overall frame number
+            int savedFrameNumber = 0; // Number of frames actually saved
 
             // Frame rate of the video file
             double frameRate = frameGrabber.getFrameRate();
-            // Interval to capture the frame (every 0.2 seconds for 5 frames per second)
-            int frameInterval = (int) Math.round(frameRate / 5);
-            int savedFrameNumber = 0; // Number of frames actually saved
+            // Interval to capture the frame (e.g., every 0.2 seconds for 5 frames per second)
+            int frameInterval = (int) Math.round(frameRate / 15);
 
             while ((frame = frameGrabber.grabFrame()) != null) {
                 if (frame.image != null) {
                     if (frameNumber % frameInterval == 0) {
                         BufferedImage bi = converter.convert(frame);
-                        String path = String.format("%s/frame_%d.png", outputDirPath, frameNumber);
+                        String path = String.format("%s/frame_%d.png", outputDirPath, savedFrameNumber);
                         ImageIO.write(bi, "png", new File(path));
                         System.out.println("Saved: " + path);
                         savedFrameNumber++;
diff --git a/src/main/java/vslam/imperial_london/CameraPoses/Pose_0001.csv b/src/main/java/vslam/imperial_london/CameraPoses/Pose_0001.csv
new file mode 100644
index 0000000000000000000000000000000000000000..ee64b6d45d1da701f12650efc684763bf9796c5c
--- /dev/null
+++ b/src/main/java/vslam/imperial_london/CameraPoses/Pose_0001.csv
@@ -0,0 +1,4 @@
+-0,-0,-0
+1,0,0
+0,1,0
+0,0,1
diff --git a/src/main/java/vslam/tum_rgbd/.DS_Store b/src/main/java/vslam/tum_rgbd/.DS_Store
index 764b3f2a720f236b901a4bb88f0cfa0ea685aa07..eb50baa3790ceac8cd83262e4e5489a4c20e03a1 100644
Binary files a/src/main/java/vslam/tum_rgbd/.DS_Store and b/src/main/java/vslam/tum_rgbd/.DS_Store differ
diff --git a/src/main/java/vslam/tum_rgbd/CameraPoses/Pose_0001.csv b/src/main/java/vslam/tum_rgbd/CameraPoses/Pose_0001.csv
new file mode 100644
index 0000000000000000000000000000000000000000..ee64b6d45d1da701f12650efc684763bf9796c5c
--- /dev/null
+++ b/src/main/java/vslam/tum_rgbd/CameraPoses/Pose_0001.csv
@@ -0,0 +1,4 @@
+-0,-0,-0
+1,0,0
+0,1,0
+0,0,1
diff --git a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png
index 65ee37f540c18af3b54228783d32733f61e3134f..58c881b4f2aadfefdc80552456fb104029c7c9e8 100644
Binary files a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png and b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png differ
diff --git a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png
index 912808f6640c610c6c42ff41a9054ccf3599b641..73b1bbf1d68d8400b2d32b671799228ea3284178 100644
Binary files a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png and b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png differ
diff --git a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png
index 453267a487af8297aff893a965e75a9b536e1014..aaf32f31fdade81a541dae25cbe7ad80bac1900a 100644
Binary files a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png and b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png differ
diff --git a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png
index 7b9a94ffa544f2f6871ed57279812ef71c32f527..f02c1f3bdb946b489fd4ab1e09112e01aec1992e 100644
Binary files a/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png and b/src/main/java/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png differ
diff --git a/src/main/java/vslam/vslam_implementation.m b/src/main/java/vslam/vslam_implementation.m
index b398faf3de7a4c297137388d1f96bdb13d45cd02..43e5975b6eba12f47ccb27b3aa1bb8a353148d0e 100644
--- a/src/main/java/vslam/vslam_implementation.m
+++ b/src/main/java/vslam/vslam_implementation.m
@@ -4,7 +4,6 @@ function vslam_implementation(dataset_name)
 
     % Define the base folder for datasets
     datasetFolder = [dataset_name, '/'];
-<<<<<<< HEAD
 
     imageFolder = [datasetFolder, 'frames/'];
     
@@ -19,28 +18,7 @@ function vslam_implementation(dataset_name)
     save([datasetFolder 'worldPointSetOutput.mat'], 'worldPointSetOutput');
 
     % Iterate over KeyFrames and execute extractPointsByViewId
-<<<<<<< HEAD
-    keyFramesDir = './KeyFrames';
-=======
-
-    imageFolder = [datasetFolder, 'frames/'];
-    
-    %imageFolder = datasetFolder;
-    %imds = imageDatastore(imageFolder);
-    imds = createSortedImds(imageFolder);
-
-    % Process the image sequence
-    [worldPointSetOutput] = ProcessImageSequence(imds, dataset_name);
-
-    % Save the outputs to .mat files within the dataset specific folder
-    save([datasetFolder 'worldPointSetOutput.mat'], 'worldPointSetOutput');
-
-    % Iterate over KeyFrames and execute extractPointsByViewId
-    keyFramesDir = [dataset_name, '/KeyFrames'];
->>>>>>> divider
-=======
     keyFramesDir = [dataset_name, '/KeyFrames'];
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
     keyFrameFiles = dir(fullfile(keyFramesDir, 'KeyFrame_*.png'));
 
     for i = 1:length(keyFrameFiles)
@@ -361,60 +339,6 @@ function worldPointSetOutput = ProcessImageSequence(imds, dataset_name)
                 MaxIteration=10);
         
     
-<<<<<<< HEAD
-<<<<<<< HEAD
-        % Refine local key frames and map points
-        [mapPointSet, vSetKeyFrames, mapPointIdx] = bundleAdjustment(...
-            mapPointSet, vSetKeyFrames, [refinedKeyFrameIds; currKeyFrameId], intrinsics, ...
-            FixedViewIDs=fixedViewIds, PointsUndistorted=true, AbsoluteTolerance=1e-7,...
-            RelativeTolerance=1e-16, Solver="preconditioned-conjugate-gradient", ...
-            MaxIteration=10);
-    
-        % Update view direction and depth
-        mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-    
-        % Update representative view
-        mapPointSet = updateRepresentativeView(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-    
-        % Check if the KeyFrames directory exists; if not, create it
-        keyFramesDir = './KeyFrames_Mono';
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
-
-        % Store feature locations for this key frame
-        keyFramePointsDir = './KeyFramePoints_Mono';
-=======
-            % Update view direction and depth
-            mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        
-            % Update representative view
-            mapPointSet = updateRepresentativeView(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        else
-            disp('Skipping bundle adjustment due to lack of refined key frames.');
-        end
-        
-        % % Check if the KeyFrames directory exists; if not, create it
-        % keyFramesDir = './KeyFrames_Mono';
-        % if ~exist(keyFramesDir, 'dir')
-        %     mkdir(keyFramesDir);
-        % end
-        % 
-        % % Store feature locations for this key frame
-        % keyFramePointsDir = './KeyFramePoints_Mono';
-        % if ~exist(keyFramePointsDir, 'dir')
-        %     mkdir(keyFramePointsDir); 
-        % end
-
-        keyFramesDir = [dataset_name, '/KeyFrames'];
-        keyFramePointsDir = [dataset_name, '/KeyFramePoints'];
-
-        % Ensure directories are created
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
->>>>>>> divider
-=======
             % Update view direction and depth
             mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
         
@@ -443,7 +367,6 @@ function worldPointSetOutput = ProcessImageSequence(imds, dataset_name)
         if ~exist(keyFramesDir, 'dir')
             mkdir(keyFramesDir);
         end
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
         if ~exist(keyFramePointsDir, 'dir')
             mkdir(keyFramePointsDir);
         end
diff --git a/src/main/java/vslam/vslam_implementation_rgbd.m b/src/main/java/vslam/vslam_implementation_rgbd.m
index 74edc4ab21bcc0ad33fb673e4d5ec34c672cf243..b32fa8d4700cd7be40e269ab25361ddb6aaf16e6 100755
--- a/src/main/java/vslam/vslam_implementation_rgbd.m
+++ b/src/main/java/vslam/vslam_implementation_rgbd.m
@@ -131,23 +131,9 @@ function vslam_implementation_rgbd(dataset_name)
     end
 
     % Process the image sequence
-<<<<<<< HEAD
-<<<<<<< HEAD
-    [worldPointSetOutput, optimizedPoses, pointCloudsAll] = ProcessImageSequence(imdsColor, imdsDepth);
-
-    % Save the outputs to .mat files
-    save('worldPointSetOutput.mat', 'worldPointSetOutput');
-    save('optimizedPoses.mat', 'optimizedPoses');
-    save('pointCloudsAll.mat', 'pointCloudsAll');
-=======
-    [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth);
-
-    % Save the outputs to .mat files
-=======
     [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth);
 
     % Save the outputs to .mat files
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
     % save('worldPointSetOutput.mat', 'worldPointSetOutput');
     % save('optimizedPoses.mat', 'optimizedPoses');
     % save('pointCloudsAll.mat', 'pointCloudsAll');
@@ -157,10 +143,6 @@ function vslam_implementation_rgbd(dataset_name)
     savePosesToCSV(optimizedPoses, 'CameraPoses');
     savePointCloudToCSV(pointCloudsAll);
     saveIntrinsicsToCSV(intrinsics);
-<<<<<<< HEAD
->>>>>>> divider
-=======
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
 
     % Iterate over KeyFrames and execute extractPointsByViewId
     keyFramesDir = './KeyFrames';
@@ -175,28 +157,12 @@ function vslam_implementation_rgbd(dataset_name)
 end
 
 
-<<<<<<< HEAD
-<<<<<<< HEAD
-function [worldPointSetOutput, optimizedPoses, pointCloudsAll] = ProcessImageSequence(imdsColor, imdsDepth)
-=======
 function [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth)
->>>>>>> divider
-=======
-function [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth)
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
     % Inspect the first RGB-D image
     currFrameIdx  = 1;
     currIcolor    = readimage(imdsColor, currFrameIdx);
     currIdepth    = readimage(imdsDepth, currFrameIdx);
-<<<<<<< HEAD
-<<<<<<< HEAD
-    imshowpair(currIcolor, currIdepth, "montage");
-=======
-    % imshowpair(currIcolor, currIdepth, "montage");
->>>>>>> divider
-=======
     % imshowpair(currIcolor, currIdepth, "montage");
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
     
     % Map Initialization: The pipeline starts by initializing the map that holds 3-D world points. 
     % This step is crucial and has a significant impact on the accuracy of the final SLAM result. 
@@ -655,11 +621,6 @@ function saveKeyFramePoints(dirPath, keyFrameId, featurePoints, mapPointsIdx)
     % Write the combined data to a CSV file
     writematrix(dataMatrix, csvFilename);
 end
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-=======
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
 
 
 function savePointCloudToCSV(pointCloudsAll)
@@ -693,9 +654,4 @@ function saveIntrinsicsToCSV(intrinsics)
         intrinsics.K];
 
     writematrix(mat, 'CameraIntrinsics.csv')
-<<<<<<< HEAD
-end
->>>>>>> divider
-=======
-end
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
+end
\ No newline at end of file
diff --git a/src/main/java/yolo/YOLONet.java b/src/main/java/yolo/YOLONet.java
index 599061d32a9f2c7470fc58014ea7e86baf429418..7a590fef9fd0b04c8bdfe2f575c047bab61e088a 100644
--- a/src/main/java/yolo/YOLONet.java
+++ b/src/main/java/yolo/YOLONet.java
@@ -132,7 +132,14 @@ public class YOLONet {
             result.height = box.height();
             detections.add(result);
         }
-
+    
+        // Clean up
+        // indices.release();
+        // confidencesPointer.release();
+        // classIds.release();
+        // confidences.release();
+        // boxes.release();
+    
         return detections;
     }
     
diff --git a/src/main/resources/templates/js/buildPC.js b/src/main/resources/templates/js/buildPC.js
index 195dbe1f718e39001bd74981f8af439d0aa1c570..d3822bacde295c4047fc090b5ecfcf5e3908717c 100644
--- a/src/main/resources/templates/js/buildPC.js
+++ b/src/main/resources/templates/js/buildPC.js
@@ -17,7 +17,7 @@ async function render(event) {
     // start loading
     displayLoading();
     console.log("Start");
-    d3.json(`http://127.0.0.1:5555/api/getObjects?dataset=${dataset_name}`, function(data){
+    d3.json(`http://127.0.0.1:5555/getJSON?dataset=${dataset_name}`, function(data){
         console.log(data);
         objectSet = data;
 
diff --git a/target/classes/application.properties b/target/classes/application.properties
deleted file mode 100644
index 4a5c2635a3e493971d146497da0075134f6914fa..0000000000000000000000000000000000000000
--- a/target/classes/application.properties
+++ /dev/null
@@ -1,2 +0,0 @@
-server.port = 5555
-#spring.data.mongodb.uri = mongodb+srv://zanem:<YXQiSFkSVqxPTs3M>@cluster0.axhv9kg.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0
\ No newline at end of file
diff --git a/target/classes/database/MongoDBInteraction.class b/target/classes/database/MongoDBInteraction.class
deleted file mode 100644
index 4bab3d8f703e89ae547400441d7dcca7d0d984af..0000000000000000000000000000000000000000
Binary files a/target/classes/database/MongoDBInteraction.class and /dev/null differ
diff --git a/target/classes/object_detection/.DS_Store b/target/classes/object_detection/.DS_Store
deleted file mode 100644
index aff5713ce0a01c7592b2fac9d9fad1c285ba7641..0000000000000000000000000000000000000000
Binary files a/target/classes/object_detection/.DS_Store and /dev/null differ
diff --git a/target/classes/object_detection/ObjectDetector.class b/target/classes/object_detection/ObjectDetector.class
deleted file mode 100644
index 1fcdd437b03a2a5e776cc12801f0f3a4d7307591..0000000000000000000000000000000000000000
Binary files a/target/classes/object_detection/ObjectDetector.class and /dev/null differ
diff --git a/target/classes/object_detection/types/CameraIntrinsics.class b/target/classes/object_detection/types/CameraIntrinsics.class
deleted file mode 100644
index 301840c4d78a2516722639ce54db6063abeaeeba..0000000000000000000000000000000000000000
Binary files a/target/classes/object_detection/types/CameraIntrinsics.class and /dev/null differ
diff --git a/target/classes/object_detection/types/Frame.class b/target/classes/object_detection/types/Frame.class
deleted file mode 100644
index e9555c634014129015cfd277daa31b2234cf20c0..0000000000000000000000000000000000000000
Binary files a/target/classes/object_detection/types/Frame.class and /dev/null differ
diff --git a/target/classes/object_detection/types/ObjectSet.class b/target/classes/object_detection/types/ObjectSet.class
deleted file mode 100644
index ee65fa9af7d54afc9a94db87cb8ce73a6234d188..0000000000000000000000000000000000000000
Binary files a/target/classes/object_detection/types/ObjectSet.class and /dev/null differ
diff --git a/target/classes/templates/html/index.html b/target/classes/templates/html/index.html
deleted file mode 100644
index 95e78fb7701b7445ef7a55d688af4993ace6a352..0000000000000000000000000000000000000000
--- a/target/classes/templates/html/index.html
+++ /dev/null
@@ -1,56 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-
-<head>
-    <meta charset="UTF-8">
-    <title>VSlam</title>
-    <link rel="stylesheet" href="../style/main.css">
-    <!-- Include three.js -->
-    <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
-    <script src='https://cdn.plot.ly/plotly-2.31.1.min.js'></script>
-    <script src='https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.min.js'></script>
-</head>
-
-<body>
-
-<h1>Integrating RGBD-VSLAM with Object Detection and Tracking</h1>
-<p> Visual Simultaneous Localization and Mapping (VSLAM) is the process of taking camera feed, as well as its position, and building a map of the current local world, specifically using visual input. This project uses this process, and builds upon it by also tracking objects within a frame. In this comes two problems: object detection, and then subsequent mapping and tracking of objects within a 3D space. For more information go <a href="https://agile.bu.edu/gitlab/ec504/ec504_projects/group8/-/blob/master/README.md?ref_type=heads">here</a></p>
-<p>These are the steps to this process:</p>
-<ol>
-    <li>Taking a video source as input, start the VSLAM algorithm to iteratively build a worldmap</li>
-    <li>Save the finalized and error-corrected worldmap, as well as the camera position for each important frame</li>
-    <li>Use YOLOv4 ConvNet on each frame, a model that returns 2D bounding boxes around objects found within an image</li>
-    <li>Project the 3D points collected from VSLAM onto each frame</li>
-    <li>For each 2D bounding box, create a potential object containing all 3D points that are projected within its bounds</li>
-    <li>Check if there is overlap in this set of points when comparing to past objects from other frames</li>
-    <li>Given sufficient overlap, combine these objects using intersection on the points</li>
-</ol>
-
-<p>Given some video of an environment, the output of this algorithm is essentially 3D groupings of points that correspond to objects within that environment.</p>
-
-<h3>Select a dataset from the dropdown below. It will take some time to process the dataset.</h3>
-
-<select id="render_form">
-    <option value="none" selected disabled hidden>Select an Option</option>
-    <option value="1">Office 1</option>
-    <option value="2">Office 2</option>
-    <option value="3">Office 3</option>
-</select>
-
-<div id="loading"></div>
-<div class="container" id="resultContainer">
-    <h5>Select An Object To View Point Cloud With That Object Highlighted</h5>
-    <div id="objectList">
-        <!-- this is where object buttons will appear after rendering -->
-    </div>
-</div>
-
-<div id="myDiv"></div>
-<script src="../js/buildPC.js"></script>
-</body>
-
-</html>
-
-
-
-
diff --git a/target/classes/templates/js/app.js b/target/classes/templates/js/app.js
deleted file mode 100644
index 2ac1825de39da991bfba1b760d4678512b9a04f1..0000000000000000000000000000000000000000
--- a/target/classes/templates/js/app.js
+++ /dev/null
@@ -1,71 +0,0 @@
-async function fetchImageAndObjects() {
-    try {
-        const response = await fetch('http://127.0.0.1:5555/api/getObjects');
-        if (!response.ok) {
-            throw new Error('Failed to fetch image and objects');
-        }
-        const data = await response.json();
-
-        displayImageAndObjects(data.objects);
-    } catch (error) {
-        console.error(error);
-    }
-}
-
-function displayImageAndObjects(objects) {
-    const objectsContainer = document.getElementById('objectsContainer');
-    const container = document.getElementById('resultContainer');
-
-    // Create a list for objects
-    const objectsList = document.createElement('ul');
-    objects.forEach(object => {
-        const objectButton = document.createElement('button');
-        objectButton.textContent = object;
-        // Add event listener to each button
-        objectButton.addEventListener('click', () => {
-            // You can define what happens when the button is clicked
-            drawPointCloud(object);
-        });
-        const objectItem = document.createElement('li');
-        objectItem.appendChild(objectButton);
-        objectsList.appendChild(objectItem);
-    });
-
-    // Append objects list to the objects container
-    objectsContainer.appendChild(objectsList);
-
-    container.style.visibility = "visible";
-}
-
-
-// selecting loading div
-const loader = document.querySelector("#loading");
-
-// showing loading
-function displayLoading() {
-    loader.classList.add("display");
-}
-
-// hiding loading 
-function hideLoading() {
-    loader.classList.remove("display");
-}
-
-async function startWorkflow(){
-    const startButton = document.getElementById("process");
-    startButton.style.display = "none";
-    displayLoading();
-    try {
-        const response = await fetch('http://127.0.0.1:5555/runProcess');
-        if (!response.ok) {
-            throw new Error('Failed to fetch image and objects');
-        }
-        const data = await response;
-        console.log(data);
-        hideLoading();
-    } catch (error) {
-        console.error(error);
-    }
-    fetchImageAndObjects();
-    
-}
diff --git a/target/classes/templates/js/buildPC.js b/target/classes/templates/js/buildPC.js
deleted file mode 100644
index 195dbe1f718e39001bd74981f8af439d0aa1c570..0000000000000000000000000000000000000000
--- a/target/classes/templates/js/buildPC.js
+++ /dev/null
@@ -1,117 +0,0 @@
-var renderForm = document.getElementById("render_form");
-renderForm.addEventListener("change", render);
-
-// selecting loading div
-const loader = document.getElementById("loading");
-hideLoading();
-
-var objectSet = []
-var traces = []
-var meshes = []
-var highlighted = false;
-
-async function render(event) {
-
-    var dataset_name = renderForm.value;
-
-    // start loading
-    displayLoading();
-    console.log("Start");
-    d3.json(`http://127.0.0.1:5555/api/getObjects?dataset=${dataset_name}`, function(data){
-        console.log(data);
-        objectSet = data;
-
-        // build object list
-        var i = 0;
-        const container = document.getElementById('resultContainer')
-        const objectList = document.getElementById('objectList');
-        objectSet['objects'].forEach(object => {
-
-            // create a button for that object, and store the index of the trace
-            const objectButton = document.createElement('button');
-            objectButton.textContent = String(i); // text will be the index of the object
-            objectButton.setAttribute("idx", i);
-            objectButton.title = "Object " + String(i);
-            objectButton.className = "myBtn";
-
-            // Add event listener to each button
-            objectButton.addEventListener('click', () => {
-                // remove all meshes from graph, and add current
-                if(highlighted != false){
-                    Plotly.deleteTraces('myDiv', [-1]);
-                }
-                else{
-                    highlighted = true;
-                }
-
-                // get index of current mesh
-                var i = objectButton.getAttribute("idx");
-                Plotly.addTraces('myDiv', [meshes[i]]);
-            });
-
-            objectList.appendChild(objectButton);
-            objectList.style.visibility = "visible";
-            container.style.visibility = "visible";
-
-            // create trace
-            var currTrac = {
-                x: object['points'].map((x) => x['x']),
-                y: object['points'].map((x) => x['y']),
-                z: object['points'].map((x) => x['z']),
-                type: 'scatter3d',
-                mode: 'markers',
-                marker: {
-                    color: object['points'].map((x) => 'rgb(' + x['R'] + ', ' + x['G'] + ', ' + x['B'] + ')'),
-                    size: 3,
-                    width: 0.2
-                }
-            }
-
-            // create mesh
-            var currMesh = {
-                alphahull: 0,
-                opacity: 0.9,
-                type: 'mesh3d',
-                x: object['points'].map((x) => x['x']),
-                y: object['points'].map((x) => x['y']),
-                z: object['points'].map((x) => x['z'])
-            }
-
-            // add object and mesh to lists
-            traces.push(currTrac);
-            meshes.push(currMesh);
-
-            i++;
-        });
-
-
-        var layout = {
-            margin: {
-                l: 0,
-                r: 0,
-                b: 0,
-                t: 0
-            },
-            showlegend: false
-        };
-
-        Plotly.newPlot('myDiv', traces, layout);
-
-        // finish loading
-        hideLoading();
-    });
-
-}
-
-
-
-// showing loading
-function displayLoading() {
-    loader.style.visibility = "visible";
-    console.log("HERE");
-}
-
-// hiding loading
-function hideLoading() {
-    loader.style.visibility = "hidden";
-}
\ No newline at end of file
diff --git a/target/classes/templates/js/pointCloud.js b/target/classes/templates/js/pointCloud.js
deleted file mode 100644
index 205af4b79c4f2681e1cfde4610f564cf2f5c8f99..0000000000000000000000000000000000000000
--- a/target/classes/templates/js/pointCloud.js
+++ /dev/null
@@ -1,140 +0,0 @@
-
-async function drawPointCloud(object) {
-  await fetch('http://127.0.0.1:5555/getJSON')
-    .then(response => response.json())
-    .then(responseText => {
-      let pointCloudData = parseJSONToPointCloud(responseText, object);
-
-      // Use three.js to render the point cloud
-      let scene = new THREE.Scene();
-      let camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
-      let renderer = new THREE.WebGLRenderer();
-      renderer.setSize(window.innerWidth, window.innerHeight);
-      document.body.appendChild(renderer.domElement);
-
-      // Create a buffer geometry and add the point cloud data
-      let geometry = new THREE.BufferGeometry();
-      geometry.setAttribute('position', new THREE.Float32BufferAttribute(new Float32Array(pointCloudData.positions), 3));
-      geometry.setAttribute('color', new THREE.Float32BufferAttribute(new Float32Array(pointCloudData.colors), 3));
-
-      let material = new THREE.PointsMaterial({ vertexColors: true, size: 0.005 });
-
-      let pointCloud = new THREE.Points(geometry, material);
-      scene.add(pointCloud);
-
-      camera.position.z = 2;
-
-      let angleX = 0;
-      let angleY = 0;
-
-      // Handle user interaction for rotation
-      let isDragging = false;
-      let previousMousePosition = {
-        x: 0,
-        y: 0
-      };
-
-      // Add event listeners to renderer's DOM element
-      renderer.domElement.addEventListener("mousedown", (event) => {
-        isDragging = true;
-        previousMousePosition = {
-          x: event.clientX,
-          y: event.clientY
-        };
-      });
-
-      renderer.domElement.addEventListener("mousemove", (event) => {
-        if (isDragging) {
-          let deltaX = event.clientX - previousMousePosition.x;
-          let deltaY = event.clientY - previousMousePosition.y;
-
-          angleY += deltaX * 0.01;
-          angleX += deltaY * 0.01;
-
-          pointCloud.rotation.y = angleY;
-          pointCloud.rotation.x = angleX;
-
-          previousMousePosition = {
-            x: event.clientX,
-            y: event.clientY
-          };
-
-          renderer.render(scene, camera);
-        }
-      });
-
-      renderer.domElement.addEventListener("mouseup", () => {
-        isDragging = false;
-      });
-
-      let zoomFactor = 1; // Initial zoom level
-
-      const zoomSensitivity = 0.01; // Adjust zoom sensitivity as needed
-
-      renderer.domElement.addEventListener("wheel", (event) => {
-        event.preventDefault();
-
-        zoomFactor += event.deltaY * zoomSensitivity;
-        zoomFactor = Math.max(0.1, zoomFactor); // Enforce minimum zoom level
-        camera.position.z = camera.initialPosition.z / zoomFactor;
-        renderer.render(scene, camera);
-      });
-
-      camera.initialPosition = { z: camera.position.z }; // Store initial position
-
-      function animate() {
-        requestAnimationFrame(animate);
-        renderer.render(scene, camera);
-      }
-
-      animate();
-
-      function parseJSONToPointCloud(jsonObject, target) {
-        // Initialize point cloud data
-        let positions = [];
-        let colors = [];
-      
-        // Check if the object name exists in the JSON
-        if (target in jsonObject || target == null) {
-          
-          for (let objectKey in jsonObject) {
-            let object = jsonObject[objectKey]['pset'];
-
-            // Loop through each coordinate in the object
-            for (let i = 0; i < object.length; i++) {
-              let coordinate = object[i];
-              
-              // Extract x, y, z values
-              let x = coordinate['x'];
-              let y = coordinate['y'];
-              let z = coordinate['z'];
-              let r,g,b;
-              
-              if(objectKey == target){
-                console.log(target);
-                r = 1; // Red component
-                g = 0; // Green component
-                b = 0; // Blue component
-              } else {
-                r = 1;
-                g = 1;
-                b = 1;
-              }
-
-              // Push to positions list
-              positions.push(x, y, z );
-              colors.push(r,g,b);
-            }
-          }
-
-        } else {
-          console.error(`Object '${object}' not found in JSON.`);
-        }
-      
-        return { positions: positions, colors: colors };
-      }
-    })
-    .catch(error => {
-      console.error('Error loading JSON:', error);
-    });
-}
\ No newline at end of file
diff --git a/target/classes/templates/style/main.css b/target/classes/templates/style/main.css
deleted file mode 100644
index 2420294cb58f5fdd32087b1070b3faea34697200..0000000000000000000000000000000000000000
--- a/target/classes/templates/style/main.css
+++ /dev/null
@@ -1,94 +0,0 @@
-html {
-  height: 100%;
-}
-
-canvas {
-  display: flex;
-}
-
-h1 {
-  margin-top: 70px;
-}
-
-body {
-  margin-left: 20%;
-  margin-right: 20%;
-}
-
-.myBtn {
-  width:50px;
-  height:25px;
-  margin: 5px;
-}
-
-#resultContainer {
-  visibility: hidden;
-  display: inline;
-}
-
-#objectList {
-  margin-top: 20px;
-  visibility: hidden;
-  display:flex;
-  flex-wrap: wrap;
-  flex-direction: row;
-  justify-content: center;
-}
-
-.container {
-  display: flex;
-  justify-content: space-between;
-}
-
-.image-container {
-  flex: 1;
-  margin-right: 20px;
-}
-
-#myDiv {
-  display: flex !important;
-  justify-content: center !important;
-  padding-bottom: 100px;
-  margin-bottom: 100px;
-}
-
-
-img {
-  max-width: 100%;
-  height: auto;
-}
-
-ul {
-  list-style-type: none;
-  padding-left: 0;
-}
-
-li {
-  margin-bottom: 5px;
-}
-
-/* creating css loader */
-
-#loading {
-  margin: auto;
-  margin-top: 50px;
-  width: 2rem;
-  height: 2rem;
-  border: 5px solid #f3f3f3;
-  border-top: 6px solid #9c41f2;
-  border-radius: 100%;
-  animation: spin 1s infinite linear;
-}
-
-
-@keyframes spin {
-  from {
-    transform: rotate(0deg);
-  }
-  to {
-    transform: rotate(360deg);
-  }
-}
-
-
-
diff --git a/target/classes/top/BackendJava$BackendService.class b/target/classes/top/BackendJava$BackendService.class
deleted file mode 100644
index edc7e712c331d235023829356ecab7e0c8457ecf..0000000000000000000000000000000000000000
Binary files a/target/classes/top/BackendJava$BackendService.class and /dev/null differ
diff --git a/target/classes/top/BackendJava$ObjectSetController.class b/target/classes/top/BackendJava$ObjectSetController.class
deleted file mode 100644
index 15b3a649b9d190075c83488e47295ccc54cd92b0..0000000000000000000000000000000000000000
Binary files a/target/classes/top/BackendJava$ObjectSetController.class and /dev/null differ
diff --git a/target/classes/top/BackendJava$WebMvcConfig.class b/target/classes/top/BackendJava$WebMvcConfig.class
deleted file mode 100644
index 303f5d410b245872f92d6764b121715c39c0cfbb..0000000000000000000000000000000000000000
Binary files a/target/classes/top/BackendJava$WebMvcConfig.class and /dev/null differ
diff --git a/target/classes/top/BackendJava.class b/target/classes/top/BackendJava.class
deleted file mode 100644
index f617de4187e40370d09836b2d40d3fc33b9de32d..0000000000000000000000000000000000000000
Binary files a/target/classes/top/BackendJava.class and /dev/null differ
diff --git a/target/classes/vid2frames/Vid2Frames.class b/target/classes/vid2frames/Vid2Frames.class
deleted file mode 100644
index a109b62e963319a6f11cce794513e4bd5ad6c413..0000000000000000000000000000000000000000
Binary files a/target/classes/vid2frames/Vid2Frames.class and /dev/null differ
diff --git a/target/classes/vslam/tum_rgbd/.DS_Store b/target/classes/vslam/tum_rgbd/.DS_Store
deleted file mode 100644
index 764b3f2a720f236b901a4bb88f0cfa0ea685aa07..0000000000000000000000000000000000000000
Binary files a/target/classes/vslam/tum_rgbd/.DS_Store and /dev/null differ
diff --git a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png b/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png
deleted file mode 100644
index 65ee37f540c18af3b54228783d32733f61e3134f..0000000000000000000000000000000000000000
Binary files a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0022.png and /dev/null differ
diff --git a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png b/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png
deleted file mode 100644
index 912808f6640c610c6c42ff41a9054ccf3599b641..0000000000000000000000000000000000000000
Binary files a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0026.png and /dev/null differ
diff --git a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png b/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png
deleted file mode 100644
index 453267a487af8297aff893a965e75a9b536e1014..0000000000000000000000000000000000000000
Binary files a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0042.png and /dev/null differ
diff --git a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png b/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png
deleted file mode 100644
index 7b9a94ffa544f2f6871ed57279812ef71c32f527..0000000000000000000000000000000000000000
Binary files a/target/classes/vslam/tum_rgbd/KeyFrames/KeyFrame_0046.png and /dev/null differ
diff --git a/target/classes/vslam/vslam_implementation.m b/target/classes/vslam/vslam_implementation.m
deleted file mode 100644
index b398faf3de7a4c297137388d1f96bdb13d45cd02..0000000000000000000000000000000000000000
--- a/target/classes/vslam/vslam_implementation.m
+++ /dev/null
@@ -1,714 +0,0 @@
-function vslam_implementation(dataset_name)
-    % Import dependencies
-    addpath('./Mathworks_VSLAM_Example/');
-
-    % Define the base folder for datasets
-    datasetFolder = [dataset_name, '/'];
-<<<<<<< HEAD
-
-    imageFolder = [datasetFolder, 'frames/'];
-    
-    %imageFolder = datasetFolder;
-    %imds = imageDatastore(imageFolder);
-    imds = createSortedImds(imageFolder);
-
-    % Process the image sequence
-    [worldPointSetOutput] = ProcessImageSequence(imds, dataset_name);
-
-    % Save the outputs to .mat files within the dataset specific folder
-    save([datasetFolder 'worldPointSetOutput.mat'], 'worldPointSetOutput');
-
-    % Iterate over KeyFrames and execute extractPointsByViewId
-<<<<<<< HEAD
-    keyFramesDir = './KeyFrames';
-=======
-
-    imageFolder = [datasetFolder, 'frames/'];
-    
-    %imageFolder = datasetFolder;
-    %imds = imageDatastore(imageFolder);
-    imds = createSortedImds(imageFolder);
-
-    % Process the image sequence
-    [worldPointSetOutput] = ProcessImageSequence(imds, dataset_name);
-
-    % Save the outputs to .mat files within the dataset specific folder
-    save([datasetFolder 'worldPointSetOutput.mat'], 'worldPointSetOutput');
-
-    % Iterate over KeyFrames and execute extractPointsByViewId
-    keyFramesDir = [dataset_name, '/KeyFrames'];
->>>>>>> divider
-=======
-    keyFramesDir = [dataset_name, '/KeyFrames'];
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-    keyFrameFiles = dir(fullfile(keyFramesDir, 'KeyFrame_*.png'));
-
-    for i = 1:length(keyFrameFiles)
-        filename = keyFrameFiles(i).name;
-        viewId = str2double(regexp(filename, '\d+', 'match', 'once')); % Extracting the numeric part from filename
-        extractPointsByViewId(viewId, worldPointSetOutput);
-    end
-end
-
-
-% function worldPointSetOutput = ProcessImageSequence(imds)
-function worldPointSetOutput = ProcessImageSequence(imds, dataset_name)
-    currFrameIdx = 1;
-    currI = readimage(imds, currFrameIdx);
-
-    %% Map Initilization
-    % Set random seed for reproducibility
-    rng(0);
-    
-    % Create a cameraIntrinsics object to store the camera intrinsic parameters.
-    % The intrinsics for the dataset can be found at the following page:
-    % https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
-    % Note that the images in the dataset are already undistorted, hence there
-    % is no need to specify the distortion coefficients.
-    focalLength    = [535.4, 539.2];    % in units of pixels
-    principalPoint = [320.1, 247.6];    % in units of pixels
-    imageSize      = size(currI,[1 2]);  % in units of pixels
-    intrinsics     = cameraIntrinsics(focalLength, principalPoint, imageSize);
-    
-    % Detect and extract ORB features
-    scaleFactor = 1.2;
-    numLevels   = 8;
-    numPoints   = 1000;
-    [preFeatures, prePoints] = helperDetectAndExtractFeatures(currI, scaleFactor, numLevels, numPoints); 
-    
-    currFrameIdx = currFrameIdx + 1;
-    firstI       = currI; % Preserve the first frame 
-    
-    isMapInitialized  = false;
-    
-    % Map initialization loop
-    while ~isMapInitialized && currFrameIdx < numel(imds.Files)
-        currI = readimage(imds, currFrameIdx);
-    
-        [currFeatures, currPoints] = helperDetectAndExtractFeatures(currI, scaleFactor, numLevels, numPoints); 
-    
-        currFrameIdx = currFrameIdx + 1;
-    
-        % Find putative feature matches
-        indexPairs = matchFeatures(preFeatures, currFeatures, Unique=true, ...
-            MaxRatio=0.9, MatchThreshold=40);
-    
-        preMatchedPoints  = prePoints(indexPairs(:,1),:);
-        currMatchedPoints = currPoints(indexPairs(:,2),:);
-    
-        % If not enough matches are found, check the next frame
-        minMatches = 100;
-        if size(indexPairs, 1) < minMatches
-            continue
-        end
-    
-        preMatchedPoints  = prePoints(indexPairs(:,1),:);
-        currMatchedPoints = currPoints(indexPairs(:,2),:);
-    
-        % Compute homography and evaluate reconstruction
-        [tformH, scoreH, inliersIdxH] = helperComputeHomography(preMatchedPoints, currMatchedPoints);
-    
-        % Compute fundamental matrix and evaluate reconstruction
-        [tformF, scoreF, inliersIdxF] = helperComputeFundamentalMatrix(preMatchedPoints, currMatchedPoints);
-    
-        % Select the model based on a heuristic
-        ratio = scoreH/(scoreH + scoreF);
-        ratioThreshold = 0.45;
-        if ratio > ratioThreshold
-            inlierTformIdx = inliersIdxH;
-            tform          = tformH;
-        else
-            inlierTformIdx = inliersIdxF;
-            tform          = tformF;
-        end
-    
-        % Computes the camera location up to scale. Use half of the 
-        % points to reduce computation
-        inlierPrePoints  = preMatchedPoints(inlierTformIdx);
-        inlierCurrPoints = currMatchedPoints(inlierTformIdx);
-        [relPose, validFraction] = estrelpose(tform, intrinsics, ...
-            inlierPrePoints(1:2:end), inlierCurrPoints(1:2:end));
-    
-        % If not enough inliers are found, move to the next frame
-        if validFraction < 0.9 || numel(relPose)==3
-            continue
-        end
-
-        % adjustment before using pose2extr
-        if size(relPose, 1) > 1
-            % If relPose is an array, select one based on a criterion, e.g., the first
-            relPose = relPose(1);
-        end
-
-    
-        % Triangulate two views to obtain 3-D map points
-        minParallax = 1; % In degrees
-        [isValid, xyzWorldPoints, inlierTriangulationIdx] = helperTriangulateTwoFrames(...
-            rigidtform3d, relPose, inlierPrePoints, inlierCurrPoints, intrinsics, minParallax);
-    
-        if ~isValid
-            continue
-        end
-    
-        % Get the original index of features in the two key frames
-        indexPairs = indexPairs(inlierTformIdx(inlierTriangulationIdx),:);
-    
-        isMapInitialized = true;
-    
-        disp(['Map initialized with frame 1 and frame ', num2str(currFrameIdx-1)])
-    end % End of map initialization loop
-
-    %% Store initial key frames and map points
-    % Create an empty imageviewset object to store key frames
-    vSetKeyFrames = imageviewset;
-    
-    % Create an empty worldpointset object to store 3-D map points
-    mapPointSet   = worldpointset;
-    
-    % Add the first key frame. Place the camera associated with the first 
-    % key frame at the origin, oriented along the Z-axis
-    preViewId     = 1;
-    vSetKeyFrames = addView(vSetKeyFrames, preViewId, rigidtform3d, Points=prePoints,...
-        Features=preFeatures.Features);
-    
-    % Add the second key frame
-    currViewId    = 2;
-    vSetKeyFrames = addView(vSetKeyFrames, currViewId, relPose, Points=currPoints,...
-        Features=currFeatures.Features);
-    
-    % Add connection between the first and the second key frame
-    vSetKeyFrames = addConnection(vSetKeyFrames, preViewId, currViewId, relPose, Matches=indexPairs);
-    
-    % Add 3-D map points
-    [mapPointSet, newPointIdx] = addWorldPoints(mapPointSet, xyzWorldPoints);
-    
-    % Add observations of the map points
-    preLocations  = prePoints.Location;
-    currLocations = currPoints.Location;
-    preScales     = prePoints.Scale;
-    currScales    = currPoints.Scale;
-    
-    % Add image points corresponding to the map points in the first key frame
-    mapPointSet   = addCorrespondences(mapPointSet, preViewId, newPointIdx, indexPairs(:,1));
-    
-    % Add image points corresponding to the map points in the second key frame
-    mapPointSet   = addCorrespondences(mapPointSet, currViewId, newPointIdx, indexPairs(:,2));
-
-    % Initialize place recognition database
-    % Load the bag of features data created offline
-    bofData         = load("bagOfFeaturesDataSLAM.mat");
-    
-    % Initialize the place recognition database
-    loopDatabase    = invertedImageIndex(bofData.bof,SaveFeatureLocations=false);
-    
-    % Add features of the first two key frames to the database
-    addImageFeatures(loopDatabase, preFeatures, preViewId);
-    addImageFeatures(loopDatabase, currFeatures, currViewId);
-
-    %% Refine and visualize initial reconstruction
-    % Run full bundle adjustment on the first two key frames
-    tracks       = findTracks(vSetKeyFrames);
-    cameraPoses  = poses(vSetKeyFrames);
-    
-    [refinedPoints, refinedAbsPoses] = bundleAdjustment(xyzWorldPoints, tracks, ...
-        cameraPoses, intrinsics, FixedViewIDs=1, ...
-        PointsUndistorted=true, AbsoluteTolerance=1e-7,...
-        RelativeTolerance=1e-15, MaxIteration=20, ...
-        Solver="preconditioned-conjugate-gradient");
-
-    % Scale the map and the camera pose using the median depth of map points
-    medianDepth   = median(vecnorm(refinedPoints.'));
-    refinedPoints = refinedPoints / medianDepth;
-    
-    refinedAbsPoses.AbsolutePose(currViewId).Translation = ...
-        refinedAbsPoses.AbsolutePose(currViewId).Translation / medianDepth;
-    relPose.Translation = relPose.Translation/medianDepth;
-    
-    % Update key frames with the refined poses
-    vSetKeyFrames = updateView(vSetKeyFrames, refinedAbsPoses);
-    vSetKeyFrames = updateConnection(vSetKeyFrames, preViewId, currViewId, relPose);
-    
-    % Update map points with the refined positions
-    mapPointSet = updateWorldPoints(mapPointSet, newPointIdx, refinedPoints);
-    
-    % Update view direction and depth 
-    mapPointSet = updateLimitsAndDirection(mapPointSet, newPointIdx, vSetKeyFrames.Views);
-    
-    % Update representative view
-    mapPointSet = updateRepresentativeView(mapPointSet, newPointIdx, vSetKeyFrames.Views);
-    
-    % Visualize matched features in the current frame
-    % close(hfeature.Parent.Parent);
-    featurePlot   = helperVisualizeMatchedFeatures(currI, currPoints(indexPairs(:,2)));
-    
-    % Visualize initial map points and camera trajectory
-    mapPlot       = helperVisualizeMotionAndStructure(vSetKeyFrames, mapPointSet);
-    
-    % Show legend
-    showLegend(mapPlot);
-
-    %% Tracking
-    % ViewId of the current key frame
-    currKeyFrameId   = currViewId;
-    
-    % ViewId of the last key frame
-    lastKeyFrameId   = currViewId;
-    
-    % Index of the last key frame in the input image sequence
-    lastKeyFrameIdx  = currFrameIdx - 1; 
-    
-    % Indices of all the key frames in the input image sequence
-    addedFramesIdx   = [1; lastKeyFrameIdx];
-    
-    isLoopClosed     = false;
-
-    % Main loop (attempt to close loop while iterating over all images in
-    % dataset)
-    isLastFrameKeyFrame = true;
-    while ~isLoopClosed && currFrameIdx < numel(imds.Files)  
-        currI = readimage(imds, currFrameIdx);
-    
-        [currFeatures, currPoints] = helperDetectAndExtractFeatures(currI, scaleFactor, numLevels, numPoints);
-    
-        % Track the last key frame
-        % mapPointsIdx:   Indices of the map points observed in the current frame
-        % featureIdx:     Indices of the corresponding feature points in the 
-        %                 current frame
-        [currPose, mapPointsIdx, featureIdx] = helperTrackLastKeyFrame(mapPointSet, ...
-            vSetKeyFrames.Views, currFeatures, currPoints, lastKeyFrameId, intrinsics, scaleFactor);
-    
-        % Track the local map and check if the current frame is a key frame.
-        % A frame is a key frame if both of the following conditions are satisfied:
-        %
-        % 1. At least 20 frames have passed since the last key frame or the
-        %    current frame tracks fewer than 100 map points.
-        % 2. The map points tracked by the current frame are fewer than 90% of
-        %    points tracked by the reference key frame.
-        %
-        % Tracking performance is sensitive to the value of numPointsKeyFrame.  
-        % If tracking is lost, try a larger value.
-        %
-        % localKeyFrameIds:   ViewId of the connected key frames of the current frame
-        numSkipFrames     = 20;
-        numPointsKeyFrame = 80;
-        [localKeyFrameIds, currPose, mapPointsIdx, featureIdx, isKeyFrame] = ...
-            helperTrackLocalMap(mapPointSet, vSetKeyFrames, mapPointsIdx, ...
-            featureIdx, currPose, currFeatures, currPoints, intrinsics, scaleFactor, numLevels, ...
-            isLastFrameKeyFrame, lastKeyFrameIdx, currFrameIdx, numSkipFrames, numPointsKeyFrame);
-
-    
-        % Visualize matched features
-        updatePlot(featurePlot, currI, currPoints(featureIdx));
-    
-        if ~isKeyFrame
-            currFrameIdx        = currFrameIdx + 1;
-            isLastFrameKeyFrame = false;
-            continue
-        else
-            isLastFrameKeyFrame = true;
-        end
-    
-        % Update current key frame ID
-        currKeyFrameId  = currKeyFrameId + 1;
-    
-        %% Local mapping
-        % Add the new key frame 
-        [mapPointSet, vSetKeyFrames] = helperAddNewKeyFrame(mapPointSet, vSetKeyFrames, ...
-            currPose, currFeatures, currPoints, mapPointsIdx, featureIdx, localKeyFrameIds);
-
-        % Remove outlier map points that are observed in fewer than 3 key frames
-        outlierIdx    = setdiff(newPointIdx, mapPointsIdx);
-        if ~isempty(outlierIdx)
-            mapPointSet   = removeWorldPoints(mapPointSet, outlierIdx);
-        end
-    
-        % Create new map points by triangulation
-        minNumMatches = 10;
-        minParallax   = 3;
-        [mapPointSet, vSetKeyFrames, newPointIdx] = helperCreateNewMapPoints(mapPointSet, vSetKeyFrames, ...
-            currKeyFrameId, intrinsics, scaleFactor, minNumMatches, minParallax);
-    
-        % Local bundle adjustment
-        [refinedViews, dist] = connectedViews(vSetKeyFrames, currKeyFrameId, MaxDistance=2);
-        % refinedKeyFrameIds = refinedViews.ViewId;
-        % fixedViewIds = refinedKeyFrameIds(dist==2);
-        % fixedViewIds = fixedViewIds(1:min(10, numel(fixedViewIds)));
-        % 
-        % % Refine local key frames and map points
-        % [mapPointSet, vSetKeyFrames, mapPointIdx] = bundleAdjustment(...
-        %     mapPointSet, vSetKeyFrames, [refinedKeyFrameIds; currKeyFrameId], intrinsics, ...
-        %     FixedViewIDs=fixedViewIds, PointsUndistorted=true, AbsoluteTolerance=1e-7,...
-        %     RelativeTolerance=1e-16, Solver="preconditioned-conjugate-gradient", ...
-        %     MaxIteration=10);
-
-        if isempty(refinedViews)
-            disp('No refined views found. Skipping local key frame refinement.');
-            refinedKeyFrameIds = []; % Set to empty if no refined views are available
-        else
-            refinedKeyFrameIds = refinedViews.ViewId;
-        end
-        
-        % Continue only if there are refined key frame IDs
-        if ~isempty(refinedKeyFrameIds)
-            fixedViewIds = refinedKeyFrameIds(dist==2);
-            fixedViewIds = fixedViewIds(1:min(10, numel(fixedViewIds)));
-        
-            % Refine local key frames and map points
-            [mapPointSet, vSetKeyFrames, mapPointIdx] = bundleAdjustment(...
-                mapPointSet, vSetKeyFrames, [refinedKeyFrameIds; currKeyFrameId], intrinsics, ...
-                FixedViewIDs=fixedViewIds, PointsUndistorted=true, AbsoluteTolerance=1e-7,...
-                RelativeTolerance=1e-16, Solver="preconditioned-conjugate-gradient", ...
-                MaxIteration=10);
-        
-    
-<<<<<<< HEAD
-<<<<<<< HEAD
-        % Refine local key frames and map points
-        [mapPointSet, vSetKeyFrames, mapPointIdx] = bundleAdjustment(...
-            mapPointSet, vSetKeyFrames, [refinedKeyFrameIds; currKeyFrameId], intrinsics, ...
-            FixedViewIDs=fixedViewIds, PointsUndistorted=true, AbsoluteTolerance=1e-7,...
-            RelativeTolerance=1e-16, Solver="preconditioned-conjugate-gradient", ...
-            MaxIteration=10);
-    
-        % Update view direction and depth
-        mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-    
-        % Update representative view
-        mapPointSet = updateRepresentativeView(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-    
-        % Check if the KeyFrames directory exists; if not, create it
-        keyFramesDir = './KeyFrames_Mono';
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
-
-        % Store feature locations for this key frame
-        keyFramePointsDir = './KeyFramePoints_Mono';
-=======
-            % Update view direction and depth
-            mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        
-            % Update representative view
-            mapPointSet = updateRepresentativeView(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        else
-            disp('Skipping bundle adjustment due to lack of refined key frames.');
-        end
-        
-        % % Check if the KeyFrames directory exists; if not, create it
-        % keyFramesDir = './KeyFrames_Mono';
-        % if ~exist(keyFramesDir, 'dir')
-        %     mkdir(keyFramesDir);
-        % end
-        % 
-        % % Store feature locations for this key frame
-        % keyFramePointsDir = './KeyFramePoints_Mono';
-        % if ~exist(keyFramePointsDir, 'dir')
-        %     mkdir(keyFramePointsDir); 
-        % end
-
-        keyFramesDir = [dataset_name, '/KeyFrames'];
-        keyFramePointsDir = [dataset_name, '/KeyFramePoints'];
-
-        % Ensure directories are created
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
->>>>>>> divider
-=======
-            % Update view direction and depth
-            mapPointSet = updateLimitsAndDirection(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        
-            % Update representative view
-            mapPointSet = updateRepresentativeView(mapPointSet, mapPointIdx, vSetKeyFrames.Views);
-        else
-            disp('Skipping bundle adjustment due to lack of refined key frames.');
-        end
-        
-        % % Check if the KeyFrames directory exists; if not, create it
-        % keyFramesDir = './KeyFrames_Mono';
-        % if ~exist(keyFramesDir, 'dir')
-        %     mkdir(keyFramesDir);
-        % end
-        % 
-        % % Store feature locations for this key frame
-        % keyFramePointsDir = './KeyFramePoints_Mono';
-        % if ~exist(keyFramePointsDir, 'dir')
-        %     mkdir(keyFramePointsDir); 
-        % end
-
-        keyFramesDir = [dataset_name, '/KeyFrames'];
-        keyFramePointsDir = [dataset_name, '/KeyFramePoints'];
-
-        % Ensure directories are created
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-        if ~exist(keyFramePointsDir, 'dir')
-            mkdir(keyFramePointsDir);
-        end
-
-        % Save the current key frame image using currKeyFrameId
-        filename = sprintf('%s/KeyFrame_%04d.png', keyFramesDir, currKeyFrameId);
-        imwrite(currI, filename);
-
-        % Save feature points information
-        saveKeyFramePoints(keyFramePointsDir, currKeyFrameId, currPoints(featureIdx), mapPointsIdx);
-
-
-        % Visualize 3D world points and camera trajectory
-        updatePlot(mapPlot, vSetKeyFrames, mapPointSet);
-
-        %% Loop closure
-        % Check loop closure after some key frames have been created    
-        if currKeyFrameId > 20
-    
-            % Minimum number of feature matches of loop edges
-            loopEdgeNumMatches = 50;
-    
-            % Detect possible loop closure key frame candidates
-            [isDetected, validLoopCandidates] = helperCheckLoopClosure(vSetKeyFrames, currKeyFrameId, ...
-                loopDatabase, currI, loopEdgeNumMatches);
-    
-            if isDetected 
-                % Add loop closure connections
-                [isLoopClosed, mapPointSet, vSetKeyFrames] = helperAddLoopConnections(...
-                    mapPointSet, vSetKeyFrames, validLoopCandidates, currKeyFrameId, ...
-                    currFeatures, loopEdgeNumMatches);
-            end
-        end
-    
-        % If no loop closure is detected, add current features into the database
-        if ~isLoopClosed
-            addImageFeatures(loopDatabase,  currFeatures, currKeyFrameId);
-        end
-    
-        % Update IDs and indices
-        lastKeyFrameId  = currKeyFrameId;
-        lastKeyFrameIdx = currFrameIdx;
-        addedFramesIdx  = [addedFramesIdx; currFrameIdx]; %#ok<AGROW>
-        currFrameIdx    = currFrameIdx + 1;
-    end % End of main loop
-
-    %% Optimizing
-    if isLoopClosed
-        % Optimize the poses
-        minNumMatches      = 20;
-        vSetKeyFramesOptim = optimizePoses(vSetKeyFrames, minNumMatches, Tolerance=1e-16);
-    
-        % Update map points after optimizing the poses
-        mapPointSet = helperUpdateGlobalMap(mapPointSet, vSetKeyFrames, vSetKeyFramesOptim);
-    
-        updatePlot(mapPlot, vSetKeyFrames, mapPointSet);
-    
-        % Plot the optimized camera trajectory
-        optimizedPoses  = poses(vSetKeyFramesOptim);
-        plotOptimizedTrajectory(mapPlot, optimizedPoses)
-    
-        % Update legend
-        showLegend(mapPlot);
-    
-        worldPointSetOutput = mapPointSet;
-    end
-    
-end
-
-
-%% Helper functions
-% The following funciton definitions are provied in the
-% Mathworks_VSLAM_Example Directory
-% helperAddLoopConnections add connections between the current keyframe and the valid loop candidate.
-% helperAddNewKeyFrame add key frames to the key frame set.
-% helperCheckLoopClosure detect loop candidates key frames by retrieving visually similar images from the database.
-% helperCreateNewMapPoints create new map points by triangulation.
-% helperORBFeatureExtractorFunction implements the ORB feature extraction used in bagOfFeatures.
-% helperTrackLastKeyFrame estimate the current camera pose by tracking the last key frame.
-% helperTrackLocalMap refine the current camera pose by tracking the local map.
-% helperVisualizeMatchedFeatures show the matched features in a frame.
-% helperVisualizeMotionAndStructure show map points and camera trajectory.
-% helperImportGroundTruth import camera pose ground truth from the downloaded data.
-
-% helperDetectAndExtractFeatures detect and extract and ORB features from the image.
-function [features, validPoints] = helperDetectAndExtractFeatures(Irgb, ...
-    scaleFactor, numLevels, numPoints, varargin)
-
-    % In this example, the images are already undistorted. In a general
-    % workflow, uncomment the following code to undistort the images.
-    %
-    % if nargin > 4
-    %     intrinsics = varargin{1};
-    % end
-    % Irgb  = undistortImage(Irgb, intrinsics);
-    
-    % Detect ORB features
-    Igray  = im2gray(Irgb);
-    
-    points = detectORBFeatures(Igray, ScaleFactor=scaleFactor, NumLevels=numLevels);
-    
-    % Select a subset of features, uniformly distributed throughout the image
-    points = selectUniform(points, numPoints, size(Igray, 1:2));
-    
-    % Extract features
-    [features, validPoints] = extractFeatures(Igray, points);
-end
-
-% helperComputeHomography compute homography and evaluate reconstruction.
-function [H, score, inliersIndex] = helperComputeHomography(matchedPoints1, matchedPoints2)
-
-    [H, inliersLogicalIndex] = estgeotform2d( ...
-        matchedPoints1, matchedPoints2, "projective", ...
-        MaxNumTrials=1e3, MaxDistance=4, Confidence=90);
-    
-    inlierPoints1 = matchedPoints1(inliersLogicalIndex);
-    inlierPoints2 = matchedPoints2(inliersLogicalIndex);
-    
-    inliersIndex  = find(inliersLogicalIndex);
-    
-    locations1 = inlierPoints1.Location;
-    locations2 = inlierPoints2.Location;
-    xy1In2     = transformPointsForward(H, locations1);
-    xy2In1     = transformPointsInverse(H, locations2);
-    error1in2  = sum((locations2 - xy1In2).^2, 2);
-    error2in1  = sum((locations1 - xy2In1).^2, 2);
-    
-    outlierThreshold = 6;
-    
-    score = sum(max(outlierThreshold-error1in2, 0)) + ...
-        sum(max(outlierThreshold-error2in1, 0));
-end
-
-% helperComputeFundamentalMatrix compute fundamental matrix and evaluate reconstruction.
-function [F, score, inliersIndex] = helperComputeFundamentalMatrix(matchedPoints1, matchedPoints2)
-
-    [F, inliersLogicalIndex]   = estimateFundamentalMatrix( ...
-        matchedPoints1, matchedPoints2, Method="RANSAC",...
-        NumTrials=1e3, DistanceThreshold=4);
-    
-    inlierPoints1 = matchedPoints1(inliersLogicalIndex);
-    inlierPoints2 = matchedPoints2(inliersLogicalIndex);
-    
-    inliersIndex  = find(inliersLogicalIndex);
-    
-    locations1    = inlierPoints1.Location;
-    locations2    = inlierPoints2.Location;
-    
-    % Distance from points to epipolar line
-    lineIn1   = epipolarLine(F', locations2);
-    error2in1 = (sum([locations1, ones(size(locations1, 1),1)].* lineIn1, 2)).^2 ...
-        ./ sum(lineIn1(:,1:2).^2, 2);
-    lineIn2   = epipolarLine(F, locations1);
-    error1in2 = (sum([locations2, ones(size(locations2, 1),1)].* lineIn2, 2)).^2 ...
-        ./ sum(lineIn2(:,1:2).^2, 2);
-    
-    outlierThreshold = 4;
-    
-    score = sum(max(outlierThreshold-error1in2, 0)) + ...
-        sum(max(outlierThreshold-error2in1, 0));
-end
-
-% helperTriangulateTwoFrames triangulate two frames to initialize the map.
-function [isValid, xyzPoints, inlierIdx] = helperTriangulateTwoFrames(...
-    pose1, pose2, matchedPoints1, matchedPoints2, intrinsics, minParallax)
-
-    camMatrix1 = cameraProjection(intrinsics, pose2extr(pose1));
-    camMatrix2 = cameraProjection(intrinsics, pose2extr(pose2));
-    
-    [xyzPoints, reprojectionErrors, isInFront] = triangulate(matchedPoints1, ...
-        matchedPoints2, camMatrix1, camMatrix2);
-    
-    % Filter points by view direction and reprojection error
-    minReprojError = 1;
-    inlierIdx  = isInFront & reprojectionErrors < minReprojError;
-    xyzPoints  = xyzPoints(inlierIdx ,:);
-    
-    % A good two-view with significant parallax
-    ray1       = xyzPoints - pose1.Translation;
-    ray2       = xyzPoints - pose2.Translation;
-    cosAngle   = sum(ray1 .* ray2, 2) ./ (vecnorm(ray1, 2, 2) .* vecnorm(ray2, 2, 2));
-    
-    % Check parallax
-    isValid = all(cosAngle < cosd(minParallax) & cosAngle>0);
-end
-
-% helperEstimateTrajectoryError calculate the tracking error.
-function rmse = helperEstimateTrajectoryError(gTruth, cameraPoses)
-    locations       = vertcat(cameraPoses.AbsolutePose.Translation);
-    gLocations      = vertcat(gTruth.Translation);
-    scale           = median(vecnorm(gLocations, 2, 2))/ median(vecnorm(locations, 2, 2));
-    scaledLocations = locations * scale;
-    
-    rmse = sqrt(mean( sum((scaledLocations - gLocations).^2, 2) ));
-    disp(['Absolute RMSE for key frame trajectory (m): ', num2str(rmse)]);
-end
-
-% helperUpdateGlobalMap update 3-D locations of map points after pose graph optimization
-function mapPointSet = helperUpdateGlobalMap(...
-    mapPointSet, vSetKeyFrames, vSetKeyFramesOptim)
-    %helperUpdateGlobalMap update map points after pose graph optimization
-    posesOld     = vSetKeyFrames.Views.AbsolutePose;
-    posesNew     = vSetKeyFramesOptim.Views.AbsolutePose;
-    positionsOld = mapPointSet.WorldPoints;
-    positionsNew = positionsOld;
-    indices     = 1:mapPointSet.Count;
-    
-    % Update world location of each map point based on the new absolute pose of 
-    % the corresponding major view
-    for i = indices
-        majorViewIds = mapPointSet.RepresentativeViewId(i);
-        poseNew = posesNew(majorViewIds).A;
-        tform = affinetform3d(poseNew/posesOld(majorViewIds).A);
-        positionsNew(i, :) = transformPointsForward(tform, positionsOld(i, :));
-    end
-    mapPointSet = updateWorldPoints(mapPointSet, indices, positionsNew);
-end
-
-% function saveKeyFramePoints(dirPath, frameIndex, featurePoints)
-%     % Extract locations and corresponding point indices
-%     locations = featurePoints.Location;
-%     indices = (1:size(locations, 1))';  % Create an index array
-%     dataMatrix = [indices, locations];
-% 
-%     % Define filename for CSV
-%     csvFilename = sprintf('%s/KeyFramePoints_%04d.csv', dirPath, frameIndex);
-% 
-%     % Write matrix to CSV
-%     writematrix(dataMatrix, csvFilename);
-% end
-
-function saveKeyFramePoints(dirPath, keyFrameId, featurePoints, mapPointsIdx)
-    % Ensure the directory exists
-    if ~exist(dirPath, 'dir')
-        mkdir(dirPath);
-    end
-
-    % Define the filename for the CSV file
-    csvFilename = sprintf('%s/KeyFramePoints_%04d.csv', dirPath, keyFrameId);
-
-    % Extract pixel locations from the feature points
-    pixelLocations = featurePoints.Location;  % This should be an Nx2 matrix
-
-    % Combine the indices, pixel locations, and corresponding world points indices into one matrix
-    dataMatrix = [pixelLocations, mapPointsIdx];  % Concatenate horizontally
-
-    % Write the combined data to a CSV file
-    writematrix(dataMatrix, csvFilename);
-end
-
-
-function sortedImds = createSortedImds(folderPath)
-    % List all frame files in the folder
-    filePattern = fullfile(folderPath, 'frame_*.png'); % Modify pattern if necessary
-    files = dir(filePattern);
-    
-    % Extract the frame numbers and sort numerically
-    frameNumbers = cellfun(@(f) sscanf(f, 'frame_%d.png'), {files.name});
-    [~, sortOrder] = sort(frameNumbers);
-    sortedFiles = files(sortOrder);
-    
-    % Create full file paths
-    sortedFilePaths = fullfile(folderPath, {sortedFiles.name});
-    
-    % Create imageDatastore with sorted file paths
-    sortedImds = imageDatastore(sortedFilePaths);
-end
-
diff --git a/target/classes/vslam/vslam_implementation_rgbd.m b/target/classes/vslam/vslam_implementation_rgbd.m
deleted file mode 100755
index 74edc4ab21bcc0ad33fb673e4d5ec34c672cf243..0000000000000000000000000000000000000000
--- a/target/classes/vslam/vslam_implementation_rgbd.m
+++ /dev/null
@@ -1,701 +0,0 @@
-function vslam_implementation_rgbd(dataset_name)
-    
-    % Import dependencies
-    addpath('./Mathworks_VSLAM_RGBD/');
-
-    % Define the base folder for datasets
-    datasetFolder = [dataset_name, '/'];
-
-    % Check if the dataset exists, if not download and prepare it
-    if ~exist(datasetFolder, 'dir')
-        if strcmp(dataset_name, 'tum_rgbd_dataset')
-            % Define the URL and local folder for download
-            baseDownloadURL = "https://cvg.cit.tum.de/rgbd/dataset/freiburg3/rgbd_dataset_freiburg3_long_office_household.tgz";
-            dataFolder = fullfile('tum_rgbd_dataset', filesep);
-            tgzFileName = [dataFolder, 'fr3_office.tgz'];
-
-            % Create a folder to save the downloaded file
-            if ~exist(dataFolder, 'dir')
-                mkdir(dataFolder);
-                disp('Downloading fr3_office.tgz (1.38 GB). This download can take a few minutes.');
-                h = waitbar(0, 'Downloading Dataset... Please wait.', 'Name', 'Downloading fr3_office');
-                websave(tgzFileName, baseDownloadURL, weboptions('Timeout', Inf));
-                % Update and close waitbar after download completes
-                waitbar(1, h, 'Download Complete. Extracting...');
-                pause(1); % Pause to show complete message
-                close(h);
-
-                % Extract contents of the downloaded file
-                disp('Extracting fr3_office.tgz (1.38 GB) ...');
-                h = waitbar(0, 'Extracting files... This may take a while.', 'Name', 'Extracting Data');
-                untar(tgzFileName, dataFolder);
-
-                % Close waitbar after extraction completes
-                waitbar(1, h, 'Extraction Complete.');
-                pause(1); % Pause to show complete message
-                close(h);
-            end
-
-            imageFolder = [dataFolder, 'rgbd_dataset_freiburg3_long_office_household/'];
-
-            % Initialize image dataset stores for color and depth images
-            imgFolderColor = [imageFolder, 'rgb/'];
-            imgFolderDepth = [imageFolder, 'depth/'];
-            imdsColor = imageDatastore(imgFolderColor);
-            imdsDepth = imageDatastore(imgFolderDepth);
-        
-            % Note that the color and depth images are generated in an un-synchronized way in the dataset. Therefore, we need to associate color images to depth images based on the time stamp.
-            % Load time stamp data of color images
-            timeColor = helperImportTimestampFile([imageFolder, 'rgb.txt']);
-            
-            % Load time stamp data of depth images
-            timeDepth = helperImportTimestampFile([imageFolder, 'depth.txt']);
-            
-            % Align the time stamp
-            indexPairs = helperAlignTimestamp(timeColor, timeDepth);
-            
-            % Select the synchronized image data
-            imdsColor     = subset(imdsColor, indexPairs(:, 1));
-            imdsDepth     = subset(imdsDepth, indexPairs(:, 2));
-
-        elseif strcmp(dataset_name, 'imperial_college_london')
-            % Define the URL and local folder for download
-            baseDownloadURL = "http://www.doc.ic.ac.uk/~ahanda/living_room_traj2_frei_png.tar.gz";
-            dataFolder = fullfile('imperial_college_london', filesep);
-            tgzFileName = [dataFolder, 'living_room_traj2_frei_png.tar.gz'];
-
-            % Create a folder to save the downloaded file
-            if ~exist(dataFolder, 'dir')
-                mkdir(dataFolder);
-                disp('Downloading living_room_traj2_frei_png.tar.gz (486 MB). This download can take a few minutes.');
-                h = waitbar(0, 'Downloading Dataset... Please wait.', 'Name', 'Downloading living_room_traj2');
-                websave(tgzFileName, baseDownloadURL, weboptions('Timeout', Inf));
-                % Update and close waitbar after download completes
-                waitbar(1, h, 'Download Complete. Extracting...');
-                pause(1); % Pause to show complete message
-                close(h);
-
-                % Extract contents of the downloaded file
-                disp('Extracting living_room_traj2 (486 MB) ...');
-                h = waitbar(0, 'Extracting files... This may take a while.', 'Name', 'Extracting Data');
-                untar(tgzFileName, dataFolder);
-
-                % Close waitbar after extraction completes
-                waitbar(1, h, 'Extraction Complete.');
-                pause(1); % Pause to show complete message
-                close(h);
-            end
-
-            imageFolder = [dataFolder];
-
-            % Initialize image dataset stores for color and depth images
-            imgFolderColor = [imageFolder, 'rgb/'];
-            imgFolderDepth = [imageFolder, 'depth/'];
-            imdsColor = imageDatastore(imgFolderColor);
-            imdsDepth = imageDatastore(imgFolderDepth);
-        else
-            error('Dataset name not recognized or dataset-specific download steps not defined.');
-        end
-    else
-        % If dataset is already present, set the image folder path
-        if strcmp(dataset_name, 'tum_rgbd_dataset')
-            imageFolder = [datasetFolder, 'rgbd_dataset_freiburg3_long_office_household/'];
-            % Initialize image dataset stores for color and depth images
-            imgFolderColor = [imageFolder, 'rgb/'];
-            imgFolderDepth = [imageFolder, 'depth/'];
-            imdsColor = imageDatastore(imgFolderColor);
-            imdsDepth = imageDatastore(imgFolderDepth);
-        
-            % Note that the color and depth images are generated in an un-synchronized way in the dataset. Therefore, we need to associate color images to depth images based on the time stamp.
-            % Load time stamp data of color images
-            timeColor = helperImportTimestampFile([imageFolder, 'rgb.txt']);
-            
-            % Load time stamp data of depth images
-            timeDepth = helperImportTimestampFile([imageFolder, 'depth.txt']);
-            
-            % Align the time stamp
-            indexPairs = helperAlignTimestamp(timeColor, timeDepth);
-            
-            % Select the synchronized image data
-            imdsColor     = subset(imdsColor, indexPairs(:, 1));
-            imdsDepth     = subset(imdsDepth, indexPairs(:, 2));
-        elseif strcmp(dataset_name, 'imperial_college_london')
-            imageFolder = [datasetFolder];
-
-            % Initialize image dataset stores for color and depth images
-            imgFolderColor = [imageFolder, 'rgb/'];
-            imgFolderDepth = [imageFolder, 'depth/'];
-            imdsColor = imageDatastore(imgFolderColor);
-            imdsDepth = imageDatastore(imgFolderDepth);
-        end 
-    end
-
-    % Process the image sequence
-<<<<<<< HEAD
-<<<<<<< HEAD
-    [worldPointSetOutput, optimizedPoses, pointCloudsAll] = ProcessImageSequence(imdsColor, imdsDepth);
-
-    % Save the outputs to .mat files
-    save('worldPointSetOutput.mat', 'worldPointSetOutput');
-    save('optimizedPoses.mat', 'optimizedPoses');
-    save('pointCloudsAll.mat', 'pointCloudsAll');
-=======
-    [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth);
-
-    % Save the outputs to .mat files
-=======
-    [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth);
-
-    % Save the outputs to .mat files
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-    % save('worldPointSetOutput.mat', 'worldPointSetOutput');
-    % save('optimizedPoses.mat', 'optimizedPoses');
-    % save('pointCloudsAll.mat', 'pointCloudsAll');
-    % save('cameraIntrinsics.mat', 'intrinsics');
-
-    % save outputs to CSV files in directories given as name:
-    savePosesToCSV(optimizedPoses, 'CameraPoses');
-    savePointCloudToCSV(pointCloudsAll);
-    saveIntrinsicsToCSV(intrinsics);
-<<<<<<< HEAD
->>>>>>> divider
-=======
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-
-    % Iterate over KeyFrames and execute extractPointsByViewId
-    keyFramesDir = './KeyFrames';
-    keyFrameFiles = dir(fullfile(keyFramesDir, 'KeyFrame_*.png'));
-
-    for i = 1:length(keyFrameFiles)
-        filename = keyFrameFiles(i).name;
-        viewId = str2double(regexp(filename, '\d+', 'match', 'once')); % Extracting the numeric part from filename
-        extractPointsByViewId(viewId, worldPointSetOutput);
-    end
-
-end
-
-
-<<<<<<< HEAD
-<<<<<<< HEAD
-function [worldPointSetOutput, optimizedPoses, pointCloudsAll] = ProcessImageSequence(imdsColor, imdsDepth)
-=======
-function [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth)
->>>>>>> divider
-=======
-function [worldPointSetOutput, optimizedPoses, pointCloudsAll, intrinsics] = ProcessImageSequence(imdsColor, imdsDepth)
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-    % Inspect the first RGB-D image
-    currFrameIdx  = 1;
-    currIcolor    = readimage(imdsColor, currFrameIdx);
-    currIdepth    = readimage(imdsDepth, currFrameIdx);
-<<<<<<< HEAD
-<<<<<<< HEAD
-    imshowpair(currIcolor, currIdepth, "montage");
-=======
-    % imshowpair(currIcolor, currIdepth, "montage");
->>>>>>> divider
-=======
-    % imshowpair(currIcolor, currIdepth, "montage");
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-    
-    % Map Initialization: The pipeline starts by initializing the map that holds 3-D world points. 
-    % This step is crucial and has a significant impact on the accuracy of the final SLAM result. 
-    % Initial ORB feature points are extracted from the first color image using helperDetectAndExtractFeatures. 
-    % Their corresponding 3-D world locations can be computed from the pixel coordinates of the feature 
-    % points and the depth value using helperReconstructFromRGBD.
-
-    % Set random seed for reproducibility
-    rng(0);
-    
-    % Create a cameraIntrinsics object to store the camera intrinsic parameters.
-    % The intrinsics for the dataset can be found at the following page:
-    % https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
-    focalLength    = [535.4, 539.2];    % in units of pixels
-    principalPoint = [320.1, 247.6];    % in units of pixels
-    imageSize      = size(currIcolor,[1,2]); % in pixels [mrows, ncols]
-    depthFactor    = 5e3;
-    intrinsics     = cameraIntrinsics(focalLength,principalPoint,imageSize);
-    
-    % Detect and extract ORB features from the color image
-    scaleFactor = 1.2;
-    numLevels   = 8;
-    [currFeatures, currPoints] = helperDetectAndExtractFeatures(currIcolor, scaleFactor, numLevels); 
-    
-    initialPose = rigidtform3d();
-    [xyzPoints, validIndex] = helperReconstructFromRGBD(currPoints, currIdepth, intrinsics, initialPose, depthFactor);
-
-    % Initialize Place Recognition Database
-    % Loop detection is performed using the bags-of-words approach. A visual vocabulary represented as a bagOfFeatures object is created offline with the ORB descriptors extracted from a large set of images in the dataset by calling:
-    % bag = bagOfFeatures(imds,CustomExtractor=@helperORBFeatureExtractorFunction, TreeProperties=[5, 10], StrongestFeatures=1);
-    % where imds is an imageDatastore object storing the training images and helperORBFeatureExtractorFunction is the ORB feature extractor function. See Image Retrieval with Bag of Visual Words for more information.
-    % The loop closure process incrementally builds a database, represented as an invertedImageIndex object, that stores the visual word-to-image mapping based on the bag of ORB features.
-
-    % Load the bag of features data created offline
-    bofData         = load("bagOfFeaturesDataSLAM.mat");
-    
-    % Initialize the place recognition database
-    loopDatabase    = invertedImageIndex(bofData.bof, SaveFeatureLocations=false);
-    
-    % Add features of the first key frame to the database
-    currKeyFrameId = 1;
-    addImageFeatures(loopDatabase, currFeatures, currKeyFrameId);
-
-    % Data Management and Visualization
-    % After the map is initialized using the first pair of color and depth image, you can use imageviewset and worldpointset to store the first key frames and the corresponding map points:
-
-    % Create an empty imageviewset object to store key frames
-    vSetKeyFrames = imageviewset;
-    
-    % Create an empty worldpointset object to store 3-D map points
-    mapPointSet   = worldpointset;
-    
-    % Add the first key frame
-    vSetKeyFrames = addView(vSetKeyFrames, currKeyFrameId, initialPose, Points=currPoints,...
-        Features=currFeatures.Features);
-    
-    % Add 3-D map points
-    [mapPointSet, rgbdMapPointsIdx] = addWorldPoints(mapPointSet, xyzPoints);
-    
-    % Add observations of the map points
-    mapPointSet = addCorrespondences(mapPointSet, currKeyFrameId, rgbdMapPointsIdx, validIndex);
-    
-    % Update view direction and depth
-    mapPointSet = updateLimitsAndDirection(mapPointSet, rgbdMapPointsIdx, vSetKeyFrames.Views);
-    
-    % Update representative view
-    mapPointSet = updateRepresentativeView(mapPointSet, rgbdMapPointsIdx, vSetKeyFrames.Views);
-    
-    % Visualize matched features in the first key frame
-    featurePlot = helperVisualizeMatchedFeaturesRGBD(currIcolor, currIdepth, currPoints(validIndex));
-    
-    % Visualize initial map points and camera trajectory
-    xLim = [-4 4];
-    yLim = [-3 1];
-    zLim = [-1 6];
-    mapPlot  = helperVisualizeMotionAndStructure(vSetKeyFrames, mapPointSet, xLim, yLim, zLim);
-    
-    % Show legend
-    showLegend(mapPlot);
-
-    % Tracking
-    % The tracking process is performed using every RGB-D image and determines when to insert a new key frame. 
-    % ViewId of the last key frame
-    lastKeyFrameId    = currKeyFrameId;
-    
-    % Index of the last key frame in the input image sequence
-    lastKeyFrameIdx   = currFrameIdx; 
-    
-    % Indices of all the key frames in the input image sequence
-    addedFramesIdx    = lastKeyFrameIdx;
-    
-    currFrameIdx      = 2;
-    isLoopClosed      = false;
-
-    % Each frame is processed as follows:
-    % ORB features are extracted for each new color image and then matched (using matchFeatures), with features in the last key frame that have known corresponding 3-D map points.
-    % Estimate the camera pose using Perspective-n-Point algorithm, which estimates the pose of a calibrated camera given a set of 3-D points and their corresponding 2-D projections using estworldpose.
-    % Given the camera pose, project the map points observed by the last key frame into the current frame and search for feature correspondences using  matchFeaturesInRadius.
-    % With 3-D to 2-D correspondences in the current frame, refine the camera pose by performing a motion-only bundle adjustment using bundleAdjustmentMotion.
-    % Project the local map points into the current frame to search for more feature correspondences using matchFeaturesInRadius and refine the camera pose again using bundleAdjustmentMotion.
-    % The last step of tracking is to decide if the current frame should be a new key frame. A frame is a key frame if both of the following conditions are satisfied:
-    % At least 20 frames have passed since the last key frame or the current frame tracks fewer than 100 map points or 25% of points tracked by the reference key frame.
-    % The map points tracked by the current frame are fewer than 90% of points tracked by the reference key frame.
-    % If the current frame is to become a key frame, continue to the Local Mapping process. Otherwise, start Tracking for the next frame.
-    
-    % Main loop
-    isLastFrameKeyFrame = true;
-    while ~isLoopClosed && currFrameIdx < numel(imdsColor.Files)
-    
-        currIcolor = readimage(imdsColor, currFrameIdx);
-        currIdepth = readimage(imdsDepth, currFrameIdx);
-    
-        [currFeatures, currPoints]    = helperDetectAndExtractFeatures(currIcolor, scaleFactor, numLevels);
-    
-        % Track the last key frame
-        % trackedMapPointsIdx:  Indices of the map points observed in the current left frame 
-        % trackedFeatureIdx:    Indices of the corresponding feature points in the current left frame
-        [currPose, trackedMapPointsIdx, trackedFeatureIdx] = helperTrackLastKeyFrame(mapPointSet, ...
-            vSetKeyFrames.Views, currFeatures, currPoints, lastKeyFrameId, intrinsics, scaleFactor);
-        
-        if isempty(currPose) || numel(trackedMapPointsIdx) < 30
-            currFrameIdx = currFrameIdx + 1;
-            continue
-        end
-        
-        % Track the local map and check if the current frame is a key frame.
-        % A frame is a key frame if both of the following conditions are satisfied:
-        %
-        % 1. At least 20 frames have passed since the last key frame or the 
-        %    current frame tracks fewer than 100 map points. 
-        % 2. The map points tracked by the current frame are fewer than 90% of 
-        %    points tracked by the reference key frame.
-        %
-        % localKeyFrameIds:   ViewId of the connected key frames of the current frame
-        numSkipFrames     = 20;
-        numPointsKeyFrame = 100;
-        [localKeyFrameIds, currPose, trackedMapPointsIdx, trackedFeatureIdx, isKeyFrame] = ...
-            helperTrackLocalMap(mapPointSet, vSetKeyFrames, trackedMapPointsIdx, ...
-            trackedFeatureIdx, currPose, currFeatures, currPoints, intrinsics, scaleFactor, numLevels, ...
-            isLastFrameKeyFrame, lastKeyFrameIdx, currFrameIdx, numSkipFrames, numPointsKeyFrame);
-    
-        % Visualize matched features
-        updatePlot(featurePlot, currIcolor, currIdepth, currPoints(trackedFeatureIdx));
-        
-        if ~isKeyFrame
-            currFrameIdx = currFrameIdx + 1;
-            isLastFrameKeyFrame = false;
-            continue
-        else
-            % Match feature points between the stereo images and get the 3-D world positions
-            [xyzPoints, validIndex] = helperReconstructFromRGBD(currPoints, currIdepth, ...
-                intrinsics, currPose, depthFactor);
-    
-            [untrackedFeatureIdx, ia] = setdiff(validIndex, trackedFeatureIdx);
-            xyzPoints = xyzPoints(ia, :);
-            isLastFrameKeyFrame = true;
-        end
-    
-        % Update current key frame ID
-        currKeyFrameId  = currKeyFrameId + 1;
-
-        % Local Mapping
-        % Local mapping is performed for every key frame. When a new key frame is determined, add it to the key frames and update the attributes of the map points observed by the new key frame. To ensure that mapPointSet contains as few outliers as possible, a valid map point must be observed in at least 3 key frames. 
-        % New map points are created by triangulating ORB feature points in the current key frame and its connected key frames. For each unmatched feature point in the current key frame, search for a match with other unmatched points in the connected key frames using matchFeatures. The local bundle adjustment refines the pose of the current key frame, the poses of connected key frames, and all the map points observed in these key frames.
-
-        % Add the new key frame    
-        [mapPointSet, vSetKeyFrames] = helperAddNewKeyFrame(mapPointSet, vSetKeyFrames, ...
-            currPose, currFeatures, currPoints, trackedMapPointsIdx, trackedFeatureIdx, localKeyFrameIds);
-            
-        % Remove outlier map points that are observed in fewer than 3 key frames
-        if currKeyFrameId == 2
-            triangulatedMapPointsIdx = [];
-        end
-        
-        [mapPointSet, trackedMapPointsIdx] = ...
-            helperCullRecentMapPoints(mapPointSet, trackedMapPointsIdx, triangulatedMapPointsIdx, ...
-            rgbdMapPointsIdx);
-        
-        % Add new map points computed from disparity 
-        [mapPointSet, rgbdMapPointsIdx] = addWorldPoints(mapPointSet, xyzPoints);
-        mapPointSet = addCorrespondences(mapPointSet, currKeyFrameId, rgbdMapPointsIdx, ...
-            untrackedFeatureIdx);
-        
-        % Create new map points by triangulation
-        minNumMatches = 10;
-        minParallax   = 0.35;
-        [mapPointSet, vSetKeyFrames, triangulatedMapPointsIdx, rgbdMapPointsIdx] = helperCreateNewMapPointsStereo( ...
-            mapPointSet, vSetKeyFrames, currKeyFrameId, intrinsics, scaleFactor, minNumMatches, minParallax, ...
-            untrackedFeatureIdx, rgbdMapPointsIdx);
-    
-        % Update view direction and depth
-        mapPointSet = updateLimitsAndDirection(mapPointSet, [triangulatedMapPointsIdx; rgbdMapPointsIdx], ...
-            vSetKeyFrames.Views);
-    
-        % Update representative view
-        mapPointSet = updateRepresentativeView(mapPointSet, [triangulatedMapPointsIdx; rgbdMapPointsIdx], ...
-            vSetKeyFrames.Views);
-    
-        % Local bundle adjustment
-        [mapPointSet, vSetKeyFrames, triangulatedMapPointsIdx, rgbdMapPointsIdx] = ...
-            helperLocalBundleAdjustmentStereo(mapPointSet, vSetKeyFrames, ...
-            currKeyFrameId, intrinsics, triangulatedMapPointsIdx, rgbdMapPointsIdx);
-
-        % Check if the KeyFrames directory exists; if not, create it
-        keyFramesDir = './KeyFrames';
-        if ~exist(keyFramesDir, 'dir')
-            mkdir(keyFramesDir);
-        end
-
-        % Store feature locations for this key frame
-        keyFramePointsDir = './KeyFramePoints';
-        if ~exist(keyFramePointsDir, 'dir')
-            mkdir(keyFramePointsDir); 
-        end
-
-        % Save the current key frame image using currKeyFrameId
-        filename = sprintf('%s/KeyFrame_%04d.png', keyFramesDir, currKeyFrameId);
-        imwrite(currIcolor, filename);
-
-        % Save feature points information
-        saveKeyFramePoints(keyFramePointsDir, currKeyFrameId, currPoints(trackedFeatureIdx), trackedMapPointsIdx);
-    
-        % Visualize 3-D world points and camera trajectory
-        updatePlot(mapPlot, vSetKeyFrames, mapPointSet);
-
-        % Loop Closure
-        % The loop closure detection step takes the current key frame processed by the local mapping process and tries to detect and close the loop. Loop candidates are identified by querying images in the database that are visually similar to the current key frame using evaluateImageRetrieval. A candidate key frame is valid if it is not connected to the last key frame and three of its neighbor key frames are loop candidates.
-        % When a valid loop candidate is found, use estgeotform3d to compute the relative pose between the loop candidate frame and the current key frame. The relative pose represents a 3-D rigid transformation stored in a rigidtform3d object. Then add the loop connection with the relative pose and update mapPointSet and vSetKeyFrames.
-        % Check loop closure after some key frames have been created    
-        if currKeyFrameId > 20
-            
-            % Minimum number of feature matches of loop edges
-            loopEdgeNumMatches = 120;
-            
-            % Detect possible loop closure key frame candidates
-            [isDetected, validLoopCandidates] = helperCheckLoopClosure(vSetKeyFrames, currKeyFrameId, ...
-                loopDatabase, currIcolor, loopEdgeNumMatches);
-            
-            if isDetected 
-                % Add loop closure connections
-                maxDistance = 0.1;
-                [isLoopClosed, mapPointSet, vSetKeyFrames] = helperAddLoopConnectionsStereo(...
-                    mapPointSet, vSetKeyFrames, validLoopCandidates, currKeyFrameId, ...
-                    currFeatures, currPoints, loopEdgeNumMatches, maxDistance);
-            end
-        end
-        
-        % If no loop closure is detected, add current features into the database
-        if ~isLoopClosed
-            addImageFeatures(loopDatabase,  currFeatures, currKeyFrameId);
-        end
-        
-        % Update IDs and indices
-        lastKeyFrameId  = currKeyFrameId;
-        lastKeyFrameIdx = currFrameIdx;
-        addedFramesIdx  = [addedFramesIdx; currFrameIdx]; %#ok<AGROW>
-        currFrameIdx    = currFrameIdx + 1;
-    end % End of main loop
-
-    % Finally, apply pose graph optimization over the essential graph in vSetKeyFrames to correct the drift. The essential graph is created internally by removing connections with fewer than minNumMatches matches in the covisibility graph. After pose graph optimization, update the 3-D locations of the map points using the optimized poses.
-    
-    % Optimize the poses
-    minNumMatches      = 50;
-    vSetKeyFramesOptim = optimizePoses(vSetKeyFrames, minNumMatches, Tolerance=1e-16);
-    
-    % Update map points after optimizing the poses
-    mapPointSet = helperUpdateGlobalMap(mapPointSet, vSetKeyFrames, vSetKeyFramesOptim);
-    
-    updatePlot(mapPlot, vSetKeyFrames, mapPointSet);
-    
-    % Plot the optimized camera trajectory
-    optimizedPoses  = poses(vSetKeyFramesOptim);
-    plotOptimizedTrajectory(mapPlot, optimizedPoses)
-    
-    % Update legend
-    showLegend(mapPlot);
-
-    worldPointSetOutput = mapPointSet;
-
-    % Dense Reconstruction from Depth Image
-    % Given the refined camera poses, you can reproject all the valid image points in the associated depth images back to the 3-D space to perform dense reconstruction. 
-    % Create an array of pointCloud objects to store the world points constructed
-
-    % from the key frames
-    ptClouds =  repmat(pointCloud(zeros(1, 3)), numel(addedFramesIdx), 1);
-    
-    % Ignore image points at the boundary 
-    offset = 40;
-    [X, Y] = meshgrid(offset:2:imageSize(2)-offset, offset:2:imageSize(1)-offset);
-    
-    for i = 1: numel(addedFramesIdx)
-        Icolor = readimage(imdsColor, addedFramesIdx(i));
-        Idepth = readimage(imdsDepth, addedFramesIdx(i));
-    
-        [xyzPoints, validIndex] = helperReconstructFromRGBD([X(:), Y(:)], ...
-            Idepth, intrinsics, optimizedPoses.AbsolutePose(i), depthFactor);
-    
-        colors = zeros(numel(X), 1, 'like', Icolor);
-        for j = 1:numel(X)
-            colors(j, 1:3) = Icolor(Y(j), X(j), :);
-        end
-        ptClouds(i) = pointCloud(xyzPoints, Color=colors(validIndex, :));
-    end
-    
-    % Concatenate the point clouds
-    pointCloudsAll = pccat(ptClouds);
-    
-    figure
-    pcshow(pointCloudsAll,VerticalAxis="y", VerticalAxisDir="down");
-    xlabel('X')
-    ylabel('Y')
-    zlabel('Z')
-end
-
-%% Helper functions
-
-% helperImportTimestampFile Import time stamp file
-
-function timestamp = helperImportTimestampFile(filename)
-    % Input handling
-    dataLines = [4, Inf];
-    
-    %% Set up the Import Options and import the data
-    opts = delimitedTextImportOptions("NumVariables", 2);
-    
-    % Specify range and delimiter
-    opts.DataLines = dataLines;
-    opts.Delimiter = " ";
-    
-    % Specify column names and types
-    opts.VariableNames = ["VarName1", "Var2"];
-    opts.SelectedVariableNames = "VarName1";
-    opts.VariableTypes = ["double", "string"];
-    
-    % Specify file level properties
-    opts.ExtraColumnsRule = "ignore";
-    opts.EmptyLineRule = "read";
-    opts.ConsecutiveDelimitersRule = "join";
-    opts.LeadingDelimitersRule = "ignore";
-    
-    % Specify variable properties
-    opts = setvaropts(opts, "Var2", "WhitespaceRule", "preserve");
-    opts = setvaropts(opts, "Var2", "EmptyFieldRule", "auto");
-    
-    % Import the data
-    data = readtable(filename, opts);
-    
-    % Convert to output type
-    timestamp = table2array(data);
-end
-
-% helperAlignTimestamp align time stamp of color and depth images.
-function indexPairs = helperAlignTimestamp(timeColor, timeDepth)
-    idxDepth = 1;
-    indexPairs = zeros(numel(timeColor), 2);
-    for i = 1:numel(timeColor)
-        for j = idxDepth : numel(timeDepth)
-            if abs(timeColor(i) - timeDepth(j)) < 1e-4
-                idxDepth = j;
-                indexPairs(i, :) = [i, j];
-                break
-            elseif timeDepth(j) - timeColor(i) > 1e-3
-                break
-            end
-        end
-    end
-    indexPairs = indexPairs(indexPairs(:,1)>0, :);
-end
-
-% helperDetectAndExtractFeatures detect and extract and ORB features from the image.
-function [features, validPoints] = helperDetectAndExtractFeatures(Irgb, scaleFactor, numLevels)
- 
-    numPoints = 1000;
-    
-    % Detect ORB features
-    Igray  = rgb2gray(Irgb);
-    
-    points = detectORBFeatures(Igray, ScaleFactor=scaleFactor, NumLevels=numLevels);
-    
-    % Select a subset of features, uniformly distributed throughout the image
-    points = selectUniform(points, numPoints, size(Igray, 1:2));
-    
-    % Extract features
-    [features, validPoints] = extractFeatures(Igray, points);
-end
-
-% helperReconstructFromRGBD reconstruct scene from color and depth image.
-
-function [xyzPoints, validIndex] = helperReconstructFromRGBD(points, ...
-    depthMap, intrinsics, currPose, depthFactor)
-
-    ptcloud = pcfromdepth(depthMap,depthFactor,intrinsics,ImagePoints=points, DepthRange=[0.1, 5]);
-    
-    isPointValid = ~isnan(ptcloud.Location(:, 1));
-    xyzPoints    = ptcloud.Location(isPointValid, :);
-    xyzPoints    = transformPointsForward(currPose, xyzPoints);
-    validIndex   = find(isPointValid);
-end
-
-% helperCullRecentMapPoints cull recently added map points.
-function [mapPointSet, mapPointsIdx] = ...
-    helperCullRecentMapPoints(mapPointSet, mapPointsIdx, newPointIdx, rgbdMapPointsIndices)
-    outlierIdx = setdiff([newPointIdx; rgbdMapPointsIndices], mapPointsIdx);
-    if ~isempty(outlierIdx)
-        mapPointSet   = removeWorldPoints(mapPointSet, outlierIdx);
-        mapPointsIdx  = mapPointsIdx - arrayfun(@(x) nnz(x>outlierIdx), mapPointsIdx);
-    end
-end
-
-% helperEstimateTrajectoryError calculate the tracking error.
-function rmse = helperEstimateTrajectoryError(gTruth, cameraPoses)
-    locations       = vertcat(cameraPoses.AbsolutePose.Translation);
-    gLocations      = vertcat(gTruth.Translation);
-    scale           = median(vecnorm(gLocations, 2, 2))/ median(vecnorm(locations, 2, 2));
-    scaledLocations = locations * scale;
-    
-    rmse = sqrt(mean( sum((scaledLocations - gLocations).^2, 2) ));
-    disp(['Absolute RMSE for key frame trajectory (m): ', num2str(rmse)]);
-end
-
-% helperUpdateGlobalMap update 3-D locations of map points after pose graph optimization
-function mapPointSet = helperUpdateGlobalMap(mapPointSet, vSetKeyFrames, vSetKeyFramesOptim)
-
-    posesOld     = vSetKeyFrames.Views.AbsolutePose;
-    posesNew     = vSetKeyFramesOptim.Views.AbsolutePose;
-    positionsOld = mapPointSet.WorldPoints;
-    positionsNew = positionsOld;
-    indices = 1:mapPointSet.Count;
-    
-    % Update world location of each map point based on the new absolute pose of 
-    % the corresponding major view
-    for i = 1: mapPointSet.Count
-        majorViewIds = mapPointSet.RepresentativeViewId(i);
-        tform = rigidtform3d(posesNew(majorViewIds).A/posesOld(majorViewIds).A);
-        positionsNew(i, :) = transformPointsForward(tform, positionsOld(i, :));
-    end
-    mapPointSet = updateWorldPoints(mapPointSet, indices, positionsNew);
-end
-
-% CSV file creation for points within keyframe
-function saveKeyFramePoints(dirPath, keyFrameId, featurePoints, mapPointsIdx)
-    % Ensure the directory exists
-    if ~exist(dirPath, 'dir')
-        mkdir(dirPath);
-    end
-
-    % Define the filename for the CSV file
-    csvFilename = sprintf('%s/KeyFramePoints_%04d.csv', dirPath, keyFrameId);
-
-    % Extract pixel locations from the feature points
-    pixelLocations = featurePoints.Location;  % This should be an Nx2 matrix
-
-    % Combine the indices, pixel locations, and corresponding world points indices into one matrix
-    dataMatrix = [pixelLocations, mapPointsIdx];  % Concatenate horizontally
-
-    % Write the combined data to a CSV file
-    writematrix(dataMatrix, csvFilename);
-end
-<<<<<<< HEAD
-<<<<<<< HEAD
-=======
-=======
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
-
-
-function savePointCloudToCSV(pointCloudsAll)
-    mat = [pointCloudsAll.Location, cast(pointCloudsAll.Color, "single")];
-    writematrix(mat, "pointcloud.csv");
-end
-
-
-function savePosesToCSV(optimizedPoses, dirPath)
-    mkdir(dirPath)
-    
-    t_size = size(optimizedPoses);
-    for i=1:t_size(1)
-        p = optimizedPoses.AbsolutePose(i, 1);
-        % first row will be translation
-        % second-fifth row will be rotation
-        mat = [p.Translation; p.R];
-        idx = optimizedPoses.ViewId(i,1);
-
-        % save to directory
-        fname = sprintf('%s/Pose_%04d.csv', dirPath, idx);
-        writematrix(mat, fname);
-    end
-end
-
-
-function saveIntrinsicsToCSV(intrinsics)
-    mat = [intrinsics.FocalLength, 0 ; 
-        intrinsics.PrincipalPoint, 0 ;
-        intrinsics.ImageSize, 0 ; 
-        intrinsics.K];
-
-    writematrix(mat, 'CameraIntrinsics.csv')
-<<<<<<< HEAD
-end
->>>>>>> divider
-=======
-end
->>>>>>> 197296f30ca7b0c49c8308bf3194348f11ab6a30
diff --git a/target/classes/yolo/YOLODetector.class b/target/classes/yolo/YOLODetector.class
deleted file mode 100644
index e3def0016b3937864a85d32d45d26b6df7d394f4..0000000000000000000000000000000000000000
Binary files a/target/classes/yolo/YOLODetector.class and /dev/null differ
diff --git a/target/classes/yolo/YOLONet$ObjectDetectionResult.class b/target/classes/yolo/YOLONet$ObjectDetectionResult.class
deleted file mode 100644
index e6976f7c3b252350a1fea3adce7b6c505db0ea26..0000000000000000000000000000000000000000
Binary files a/target/classes/yolo/YOLONet$ObjectDetectionResult.class and /dev/null differ
diff --git a/target/classes/yolo/YOLONet.class b/target/classes/yolo/YOLONet.class
deleted file mode 100644
index 9883e92355d00828d12d42e909f8dff21bc01dcb..0000000000000000000000000000000000000000
Binary files a/target/classes/yolo/YOLONet.class and /dev/null differ
diff --git a/target/test-classes/object_detection/ObjectSetTests.class b/target/test-classes/object_detection/ObjectSetTests.class
deleted file mode 100644
index 77b1c4cd8af9303a7f5c1672d25278ae528fda24..0000000000000000000000000000000000000000
Binary files a/target/test-classes/object_detection/ObjectSetTests.class and /dev/null differ
diff --git a/target/test-classes/yolo/YOLOTest.class b/target/test-classes/yolo/YOLOTest.class
deleted file mode 100644
index 352ef517317a693c31e5d1812e9a74429c65854c..0000000000000000000000000000000000000000
Binary files a/target/test-classes/yolo/YOLOTest.class and /dev/null differ