diff --git a/OpenCV/Photos/cat.jpg b/OpenCV/Photos/cat.jpg
new file mode 100644
index 0000000..8bc8ac4
Binary files /dev/null and b/OpenCV/Photos/cat.jpg differ
diff --git a/OpenCV/Photos/cat_large.jpg b/OpenCV/Photos/cat_large.jpg
new file mode 100644
index 0000000..37706ec
Binary files /dev/null and b/OpenCV/Photos/cat_large.jpg differ
diff --git a/OpenCV/Photos/cats.jpg b/OpenCV/Photos/cats.jpg
new file mode 100644
index 0000000..3b8d75d
Binary files /dev/null and b/OpenCV/Photos/cats.jpg differ
diff --git a/OpenCV/Photos/meme.jpg b/OpenCV/Photos/meme.jpg
new file mode 100644
index 0000000..a8eae09
Binary files /dev/null and b/OpenCV/Photos/meme.jpg differ
diff --git a/OpenCV/Photos/park.jpg b/OpenCV/Photos/park.jpg
new file mode 100644
index 0000000..2bef86b
Binary files /dev/null and b/OpenCV/Photos/park.jpg differ
diff --git a/OpenCV/Photos/tj.jpg b/OpenCV/Photos/tj.jpg
new file mode 100644
index 0000000..8b5d0dd
Binary files /dev/null and b/OpenCV/Photos/tj.jpg differ
diff --git a/OpenCV/Videos/dog.mp4 b/OpenCV/Videos/dog.mp4
new file mode 100644
index 0000000..5a6eee3
Binary files /dev/null and b/OpenCV/Videos/dog.mp4 differ
diff --git a/OpenCV/Videos/kitten.mp4 b/OpenCV/Videos/kitten.mp4
new file mode 100644
index 0000000..94fb406
Binary files /dev/null and b/OpenCV/Videos/kitten.mp4 differ
diff --git a/OpenCV/basic.py b/OpenCV/basic.py
new file mode 100644
index 0000000..87084b3
--- /dev/null
+++ b/OpenCV/basic.py
@@ -0,0 +1,42 @@
+import cv2 as cv
+
+img = cv.imread('Photos/park.jpg')
+
+cv.imshow('park', img)
+
+# Converting to grayscale
+gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+cv.imshow('Gray', gray) 
+
+img = cv.imread('Photos/meme.jpg')
+
+cv.imshow('Meme', img)
+
+gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)   # convert bgr image to grayscale image 
+cv.imshow('Meme', gray)
+
+# Blur
+blur = cv.GaussianBlur(img, (7,7), cv.BORDER_DEFAULT)   # kernel size has to be an odd number (increase size to increase blur)
+cv.imshow('Blur', blur)
+
+# Edge cascade
+canny = cv.Canny(blur, 125, 175) # can reduce the amount of edges by using blur instead of img
+cv.imshow('Canny Edges', canny)  
+
+# Dilating the image
+dilated = cv.dilate(canny, (7,7), iterations = 3) # increase to make edges thicker basically
+cv.imshow('Dilated', dilated)
+
+# Eroding
+eroded = cv.erode(dilated, (7, 7), iterations = 3)  # reverse dilation and try to close to same edge cascade as original
+cv.imshow('Eroded', eroded)
+
+# Resize                             # cv.INTER_CUBIC is slowest of them all, but image that you get is much higher quality 
+resized = cv.resize(img, (500, 500), interpolation=cv.INTER_CUBIC)   # does not put into account aspect ratio 
+cv.imshow('Resized', resized)   # interpolation = cv.INTER_AREA is useful when shrinking the image to dimensions that are smaller than original
+
+# Cropping  (images are arrays, and we can employ array splicing which is basically selecting a portion of the image on the basis of their pixel values)
+cropped = img[50:200, 200:400]
+cv.imshow('Cropped', cropped)
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/bitwise.py b/OpenCV/bitwise.py
new file mode 100644
index 0000000..c817595
--- /dev/null
+++ b/OpenCV/bitwise.py
@@ -0,0 +1,5 @@
+import cv2 as cv
+
+
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/contours.py b/OpenCV/contours.py
new file mode 100644
index 0000000..a5ca00d
--- /dev/null
+++ b/OpenCV/contours.py
@@ -0,0 +1,42 @@
+import cv2 as cv
+import numpy as np
+
+img = cv.imread('Photos/cats.jpg')
+cv.imshow('Cats', img)
+
+blank = np.zeros(img.shape, dtype='uint8')
+cv.imshow('Blank', blank)
+
+# contours (curves that join the points along the boundary) and edges are different (but basically same and can technically treat them as the same)
+
+gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+cv.imshow('Gray', gray)
+
+blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
+cv.imshow('Blur', blur)
+
+canny = cv.Canny(blur, 125, 175)
+cv.imshow('Canny Edges', canny)
+
+# ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) #loooks at an image and tries to binarize it
+# if density of a pixel is below 125 its going to be set to 0 or black and if its above 125 it is set to white or 255 
+# cv.imshow('Thresh', thresh)
+
+contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)   
+# contours are a python list of all coordinates of the contours that were found in the image
+# hierarchies refers to the hierarchacal representation of the contours 
+
+# cv.RETR_LIST (returns all contours) is a mode in which the find contours methods returns and finds the contours
+# RETR_EXTERNAL (retirms all external or outside)
+# RETR_TREE (returns all hierarchacal contours)
+
+# contour approximation methods: how we want to approximate the contour 
+# CHAIN_APPROX_NONE does nothing and just returns all the contours
+# CHAIN_APPROX_SIMPLE compresses all the contours that were returned into simple ones that make most sense
+
+print(f'{len(contours)} contour(s) found!')
+
+cv.drawContours(blank, contours, -1, (0,0,255), 1)  #contours that were drawn in image
+cv.imshow('Contours Drawing', blank)
+
+cv.waitKey(0)
diff --git a/OpenCV/draw.py b/OpenCV/draw.py
new file mode 100644
index 0000000..df39c98
--- /dev/null
+++ b/OpenCV/draw.py
@@ -0,0 +1,37 @@
+import cv2 as cv
+import numpy as np
+
+
+blank = np.zeros((500, 500, 3), dtype='uint8')   # uint8 is data type of image    # (height, width, # of color channels)
+cv.imshow('Blank', blank)
+
+# img = cv.imread('Photos/cat.jpg')
+# cv.imshow('Cat', img)
+
+#1. Paint the image a certain color
+blank[:] = 0, 255, 0  # ":" indicates all pixels (painting all pixels green)
+cv.imshow('Green', blank)
+
+blank[200:300, 300:400] = 0,0,255
+cv.imshow('Red Portion', blank)
+
+#2. Draw a rectangle
+cv.rectangle(blank, (0,0), (250,250), (0,255,0), thickness = -1)    # use cv.FILLED method or -1 as thickness to fill rectangle
+cv.imshow('Rectangle', blank)
+
+cv.rectangle(blank, (0,0), (blank.shape[1]//2, blank.shape[0]//2), (0,255,0), thickness = -1)  # scaled entire rectangle into 1/2 of original image
+cv.imshow('Rectangle', blank)
+
+#3. Draw a circle
+cv.circle(blank, (250, 250), 40, (0, 0, 255), thickness = -1)
+cv.imshow('Circle', blank)
+
+#4. Draw a line
+cv.line(blank, (0,0), (blank.shape[1]//2, blank.shape[0]//2), (255,255,255), thickness = 3)
+cv.imshow('Line', blank)
+
+#5. Write text
+cv.putText(blank, 'Hello, my name is Ryan!', (0, 225), cv.FONT_HERSHEY_TRIPLEX, 1.0, (0,255,0), thickness = 2)
+cv.imshow('Text', blank)
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/read.py b/OpenCV/read.py
new file mode 100644
index 0000000..32ef047
--- /dev/null
+++ b/OpenCV/read.py
@@ -0,0 +1,23 @@
+import cv2 as cv
+
+img = cv.imread('Photos/cat.jpg')  # takes in a path to an image and returns that image as a matrix of pixels
+
+img = cv.imread('Photos/cat_large.jpg')  # image is far greater than dimensions of monitor
+
+cv.imshow('Cat', img)  # displays image as a new window
+
+# Reading videos
+
+capture = cv.VideoCapture('videos/dog.mp4')   # provide integer argument if using webcam (0 is webcam, 1 is first camera connected to computer)
+
+while True:
+    isTrue, frame = capture.read()  # capture.read reads in video frame by frame and returns the frame and a boolean that says whether the frame was succesfully read in or not
+    cv.imshow('Video', frame)
+
+    if cv.waitKey(20) & 0xFF==ord('d'):   # if d is pressed break out of loop and stop displaying video
+        break
+
+capture.release()
+cv.destroyAllWindows()
+
+cv.waitKey(0)    # keyboard binding function (waits for a specific delay in milliseconds for a key to be pressed)
\ No newline at end of file
diff --git a/OpenCV/rescale.py b/OpenCV/rescale.py
new file mode 100644
index 0000000..66da655
--- /dev/null
+++ b/OpenCV/rescale.py
@@ -0,0 +1,42 @@
+import cv2 as cv
+
+img = cv.imread('Photos/cat.jpg') 
+cv.imshow('Cat', img) 
+
+def rescaleFrame(frame, scale = 0.75):
+    # images, videos, and live video
+    width = int(frame.shape[1] * scale)    # frame.shape[1] is basically width
+    height = int(frame.shape[0] * scale) # frame.shape[0] is basically height
+    dimensions = (width, height)
+
+    return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
+
+def changeRes(width,height):
+    # live video
+    capture.set(3,width)    # makes 3 reference width
+    capture.set(4,height)   # makes 4 reference height
+
+
+
+resized_image = rescaleFrame(img)
+cv.imshow('Image', resized_image)
+
+# reading videos
+
+capture = cv.VideoCapture('videos/dog.mp4')   # provide integer argument if using webcam (0 is webcam, 1 is first camera connected to computer)
+
+while True:
+    isTrue, frame = capture.read()  # capture.read reads in video frame by frame and returns the frame and a boolean that says whether the frame was succesfully read in or not
+    
+    frame_resized = rescaleFrame(frame, 0.2)
+
+    cv.imshow('Video', frame)
+    cv.imshow('Video Resized', frame_resized)
+
+    if cv.waitKey(20) & 0xFF==ord('d'):   # if d is pressed break out of loop and stop displaying video
+        break
+
+capture.release()
+cv.destroyAllWindows()
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/smoothing.py b/OpenCV/smoothing.py
new file mode 100644
index 0000000..d467e15
--- /dev/null
+++ b/OpenCV/smoothing.py
@@ -0,0 +1,28 @@
+import cv2 as cv
+
+img = cv.imread('Photos/cats.jpg')  # apply a blurring method to smooth out the image or reduce some of the noise
+cv.imshow('Cats', img)
+
+# kernel (or window) = a "window" you draw over an image
+# the size of the window is called "kernel size" (# of rows and # of columns)
+# blur is applied to the middle pixel as a result of the pixels around it (surrounding pixels)
+
+# Averaging (the window will compute the pixel intesity of the middle pixel of the true center as the average of the surrounding pixel intensities)
+average = cv.blur(img, (3,3))   #increase kernel size to increase blur
+cv.imshow('Average Blur', average)
+
+# Gaussian Blur (each surrounding pixel is given a weight, and the average of the products of those weights give you the value for the true center)
+# less blur, but more natural than averaging
+gauss = cv.GaussianBlur(img, (3,3), 0)
+cv.imshow('Gaussian Blur', gauss)
+
+# Median Blur (same as averaging, except it finds the median of the surrounding pixels instead of the average)
+# more effective in reducing noise in an image compared to Averaging and Gaussian Blur  
+median = cv.medianBlur(img, 3)  # openCV automatically assumes that this kernel size will be a 3 by 3 just by the integer 
+cv.imshow('Median Blur', median)  # not meant for high kernel sizes like 7
+
+# Bilateral Blurring (applies blurring but retains the edges in the image)
+bilateral = cv.bilateralFilter(img, 10, 35, 25)
+cv.imshow('Bilateral', bilateral)
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/spaces.py b/OpenCV/spaces.py
new file mode 100644
index 0000000..215d5ff
--- /dev/null
+++ b/OpenCV/spaces.py
@@ -0,0 +1,39 @@
+import cv2 as cv
+import matplotlib.pyplot as plt
+
+img = cv.imread('Photos/park.jpg')
+cv.imshow('Park', img)
+
+# plt.imshow(img)
+# plt.show()
+
+# BGR to Grayscale
+gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
+cv.imshow('Gray', gray)
+
+# BGR to HSV (hue saturation value based on how humans think and conceive of color)
+hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
+cv.imshow('HSV', hsv)
+
+# BGR to Lab (or L*a*b)
+lab = cv.cvtColor(img, cv.COLOR_BGR2LAB)
+cv.imshow('LAB', lab)
+
+# openCV reads in images in a BGR format and that's not the current system we use to represent colors outside of OpenCV (we use RGB)
+
+# BGR to RGB
+rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
+cv.imshow('RGB', rgb)
+
+# HSV to BGR
+hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
+cv.imshow('HSV--> BGR', hsv_bgr)
+
+# LAB to BGR
+lab_bgr = cv.cvtColor(lab, cv.COLOR_LAB2BGR)
+cv.imshow('LAB--> BGR', lab_bgr)
+
+plt.imshow(rgb)  # matplotlib default is rgb (keep in mind of inversion of colors that tends to take place between OpenCV and MatPlotLib libraries)
+plt.show()
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/splitmerge.py b/OpenCV/splitmerge.py
new file mode 100644
index 0000000..c4e33ec
--- /dev/null
+++ b/OpenCV/splitmerge.py
@@ -0,0 +1,30 @@
+import cv2 as cv
+import numpy as np 
+
+img = cv.imread('Photos/park.jpg')
+cv.imshow('Park', img)
+
+blank = np.zeros(img.shape[:2], dtype='uint8')
+
+# a color image consists of multiple channels: red, gree, and blue
+# openCV allows us to split an image into its respective color channels
+
+b, g, r = cv.split(img)    # grayscale because it shows the pixel intensity with the lighter portion showing higher concentration of a color
+
+blue = cv.merge([b,blank,blank])   # sets green and red components to black
+green = cv.merge([blank,g,blank])
+red = cv.merge([blank,blank,r])
+
+cv.imshow('Blue', blue)
+cv.imshow('Green', green)
+cv.imshow('Red', red)
+
+print(img.shape)
+print(b.shape)   # grayscale images have a shape of 1
+print(g.shape)
+print(r.shape)
+
+merged = cv.merge([b, g, r])
+cv.imshow('Merged', merged)
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/OpenCV/transformations.py b/OpenCV/transformations.py
new file mode 100644
index 0000000..812f073
--- /dev/null
+++ b/OpenCV/transformations.py
@@ -0,0 +1,57 @@
+import cv2 as cv
+import numpy as np
+
+img = cv.imread('Photos/tj.jpg')
+
+cv.imshow('TJHSST', img)
+
+# Translation
+def translate(img, x, y):
+    transMat = np.float32([[1,0,x], [0,1,y]]) # create a translation matrix which takes in a list with two lists inside of it
+    dimensions = (img.shape[1], img.shape[0])  # tuple of img.shape[1]
+    return cv.warpAffine(img, transMat, dimensions)
+
+# -x --> Left
+# -y --> Up
+# x --> Right
+# y --> Down
+
+translated = translate(img, -100, 100)
+cv.imshow('Translated', translated)
+
+# Rotation
+def rotate(img, angle, rotPoint = None):
+    (height, width) = img.shape[:2]
+
+    if rotPoint is None:    # assuming were going to rotate around the center if no rotation point is given
+        rotPoint = (width//2, height//2)
+    
+    rotMat = cv.getRotationMatrix2D(rotPoint, angle, 1.0)   # 1.0 = scale
+    dimensions = (width,height)
+
+    return cv.warpAffine(img, rotMat, dimensions)
+
+rotated = rotate(img, 45)   # input negative value for degrees if wanting clockwise
+cv.imshow('Rotated', rotated)
+
+rotated_rotated = rotate(rotated, 45)
+cv.imshow('Rotated Rotated', rotated_rotated)  # black lines is default when there's no image there
+                                            # rotated black triangeles along with the image (can save this trouble by just adding the total angle change you want)
+
+# Resizing 
+resized = cv.resize(img, (500,500), interpolation=cv.INTER_CUBIC)   #for resizing to smaller just use default
+cv.imshow('Resized', resized)
+
+# Flipping 
+flip = cv.flip(img, 0)
+# takes in 3 possible flip codes:
+# 0 implies flipping the image vertically (over x-axis)
+# 1 specifies flipping the image horizontally (over the y-axis)
+# -1 implies flipping the image both vertically and horizontally
+cv.imshow('Flip', flip)
+
+# Cropping 
+cropped = img[200:400, 300:400]
+cv.imshow('Cropped', cropped)
+
+cv.waitKey(0)
\ No newline at end of file
diff --git a/webots/python/worlds/.example.wbproj b/webots/python/worlds/.example.wbproj
index ed614b8..04bc6cc 100644
--- a/webots/python/worlds/.example.wbproj
+++ b/webots/python/worlds/.example.wbproj
@@ -1,7 +1,8 @@
 Webots Project File version R2021b
-perspectives: 000000ff00000000fd000000040000000000000069000003d6fc0100000002fc00000000ffffffff0000000000fffffffc0200000001fb00000012005300630065006e0065005400720065006501000000000000039f0000000000000000fb0000001a0044006f00630075006d0065006e0074006100740069006f006e0000000000ffffffff0000000000000000000000010000023a000002f1fc0200000001fb0000001400540065007800740045006400690074006f00720100000016000002f10000008900ffffff000000020000078000000242fc0100000001fb0000001e00480074006d006c0052006f0062006f007400570069006e0064006f007700000000000000078000000000000000000000000300000780000000e3fc0100000002fb0000000e0043006f006e0073006f006c006501000000000000073f0000000000000000fb0000001a0043006f006e0073006f006c00650041006c006c0041006c006c0100000000000007800000006900ffffff00000544000002f100000004000000040000000100000008fc00000000
+perspectives: 000000ff00000000fd000000030000000000000069000003ecfc0100000003fc00000000ffffffff0000000000fffffffc0200000001fb00000012005300630065006e0065005400720065006501000000000000039f0000000000000000fb0000001a0044006f00630075006d0065006e0074006100740069006f006e0000000000ffffffff0000000000000000fb0000001e00480074006d006c0052006f0062006f007400570069006e0064006f00770000000000000000690000006900ffffff000000010000023a000003ecfc0200000001fb0000001400540065007800740045006400690074006f0072010000001a000003ec0000008700ffffff0000000300000716000000e3fc0100000002fb0000000e0043006f006e0073006f006c006501000000000000073f0000000000000000fb0000001a0043006f006e0073006f006c00650041006c006c0041006c006c010000006b00000716000000000000000000000545000003ec00000004000000040000000100000008fc00000000
 simulationViewPerspectives: 000000ff00000001000000020000016c000004a00100000002010000000101
 sceneTreePerspectives: 000000ff0000000100000002000000c0000001120100000002010000000201
+minimizedPerspectives: 000000ff00000000fd000000030000000000000069000003ecfc0100000003fc00000000ffffffff0000000000fffffffc0200000001fb00000012005300630065006e0065005400720065006501000000000000039f0000000000000000fb0000001a0044006f00630075006d0065006e0074006100740069006f006e0000000000ffffffff0000000000000000fb0000001e00480074006d006c0052006f0062006f007400570069006e0064006f00770100000000000000690000006900ffffff000000010000023a00000307fc0200000001fb0000001400540065007800740045006400690074006f0072010000001a000003070000008700ffffff0000000300000716000000e3fc0100000002fb0000000e0043006f006e0073006f006c006501000000000000073f0000000000000000fb0000001a0043006f006e0073006f006c00650041006c006c0041006c006c010000006b000007160000006900ffffff000004da0000030700000004000000040000000100000008fc00000000
 maximizedDockId: -1
 centralWidgetVisible: 1
 projectionMode: PERSPECTIVE
@@ -9,5 +10,4 @@ renderingMode: PLAIN
 orthographicViewHeight: 1.99796
 textFiles: 0 "controllers/LidarStuff/LidarStuff.py"
 globalOptionalRendering: LidarRaysPaths::LidarPointClouds::DistanceSensorRays
-consoles: Console:All:All
 renderingDevicePerspectives: robot1:camera;1;1.40625;0;0