Face Recognition.................

Face Recognition.................

Task 6.1

Collecting Samples

import cv2

import numpy as np

# Load HAAR face classifier

face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

# Load functions

def face_extractor(img):

  # Function detects faces and returns the cropped face

  # If no face detected, it returns the input image

  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

  faces = face_classifier.detectMultiScale(gray, 1.3, 5)

  if faces is ():

    return None

    # Crop all faces found

  for (x,y,w,h) in faces:

    cropped_face = img[y:y+h, x:x+w]

 return cropped_face

# Initialize Webcam

cap = cv2.VideoCapture(0)

count = 0

# Collect 100 samples of your face from webcam input

while True:

  ret, frame = cap.read()

  if face_extractor(frame) is not None:

    count += 1

    face = cv2.resize(face_extractor(frame), (200, 200))

    face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

    # Save file in specified directory with unique name

    file_name_path = '/home/student/Desktop/face/' + str(count) + '.jpg'

    cv2.imwrite(file_name_path, face)

    # Put count on images and display live count

    cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)

    cv2.imshow('Face Cropper', face)

  else:

    print("Face not found")

    pass

  if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key

    break

cap.release()

cv2.destroyAllWindows()    

print("Collecting Samples of face 1 Completed")

cap.release()

Using this code collected samples .

Training a model:

import cv2

import numpy as np

from os import listdir

from os.path import isfile, join

# Get the training data we previously made

data_path = '/home/student/Desktop/face/'

onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]

# Create arrays for training data and labels

Training_Data, Labels = [], []

# Open training images in our datapath

# Create a numpy array for training data

for i, files in enumerate(onlyfiles):

  image_path = data_path + onlyfiles[i]

  images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

  Training_Data.append(np.asarray(images, dtype=np.uint8))

  Labels.append(i)

# Create a numpy array for both training data and labels

Labels = np.asarray(Labels, dtype=np.int32)

# Initialize facial recognizer

# model = cv2.face.createLBPHFaceRecognizer()

# NOTE: For OpenCV 3.0 use cv2.face.createLBPHFaceRecognizer()

# pip install opencv-contrib-python

# model = cv2.createLBPHFaceRecognizer()

srilekha_model = cv2.face_LBPHFaceRecognizer.create()

# Let's train our model 

srilekha_model.train(np.asarray(Training_Data), np.asarray(Labels))

print("Model trained sucessefully")

output:

No alt text provided for this image

Face Recognition

import cv2

import numpy as np

import os

import pywhatkit as pw

import smtplib

face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

def face_detector(img, size=0.5):

  # Convert image to grayscale

  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

  faces = face_classifier.detectMultiScale(gray, 1.3, 5)

  if faces is ():

    return img, []

  for (x,y,w,h) in faces:

    cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)

    roi = img[y:y+h, x:x+w]

    roi = cv2.resize(roi, (200, 200))

  return img, roi

# Open Webcam

cap = cv2.VideoCapture(0)

while True:

  ret, frame = cap.read()

  image, face = face_detector(frame)

  try:

    face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

    # Pass face to prediction model

    # "results" comprises of a tuple containing the label and the confidence value

    results = srilekha_model.predict(face)

    if results[1] < 500:

      confidence = int( 100 * (1 - (results[1])/400) )

      display_string = str(confidence) + '% Confident it is User'

      cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2)

      if confidence > 80:

      cv2.putText(image, "Hey srilekha", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)

      cv2.imshow('Face Recognition', image )

      print("model1 detected")

      t=time.localtime()

      pw.sendwhatmsg('+phone_number','face detected',t[3],t[4]+2)

     # creates SMTP session

      s = smtplib.SMTP('smtp.gmail.com', 587)

      #start TLS for security

      s.starttls()

      #Authentication

      s.login("[email protected]","password")

      #message to be sent

      message = "This is face of Srilekha"

      #sending the mail

      s.sendmail("[email protected]","[email protected]", message)

     # terminating the session

      s.quit()

      break

  except:

    cv2.putText(image, "No Face Found", (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)

    cv2.putText(image, "looking for face", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)

    cv2.imshow('Face Recognition', image )

    pass

  if cv2.waitKey(1) == 13: #13 is the Enter Key

    break

cap.release()

cv2.destroyAllWindows()   

output:

No alt text provided for this image

Whatsapp message:

No alt text provided for this image

Email:

No alt text provided for this image

Task 6.2

When Second face(my face only) was detected, aws instance was launched:

import cv2

import numpy as np

import os

face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

def face_detector(img, size=0.5):

  # Convert image to grayscale

  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

  faces = face_classifier.detectMultiScale(gray, 1.3, 5)

  if faces is ():

    return img, []

  for (x,y,w,h) in faces:

    cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)

    roi = img[y:y+h, x:x+w]

    roi = cv2.resize(roi, (200, 200))

  return img, roi


# Open Webcam

cap = cv2.VideoCapture(0)

while True:

ret, frame = cap.read()

  image, face = face_detector(frame)

  try:

    face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)

# Pass face to prediction model

    # "results" comprises of a tuple containing the label and the confidence value

    results1 = srilekha_model.predict(face)

    # harry_model.predict(face)

    if results1[1]<500:

      confidence1 = int( 100 * (1 - (results1[1])/400) )

      display_string = str(confidence1) + '% Confident it is User

  cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2)

    if confidence1 > 80

        cv2.putText(image, "Hey Srilekha", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)

        cv2.imshow('Face Recognition', image )

       os.system('terraform init')

        os.system('terraform apply --auto-approve')

        break

  except:

    cv2.putText(image, "No Face Found", (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)

    cv2.putText(image, "looking for face", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)

    cv2.imshow('Face Recognition', image )

    pass

  if cv2.waitKey(1) == 13: #13 is the Enter Key

    break

cap.release()

cv2.destroyAllWindows()   

and the terraform code is

provider "aws"{

 region="ap-south-1"

 profile="default"

}

resource "aws_instance" "os1"{

  ami="ami-0ad704c126371a549"

  instance_type="t2.micro"

}

resource "aws_ebs_volume" "storage"{

  availability_zone=aws_instance.os1.availability_zone

  size=5

}

resource "aws_volume_attachment" "storage_attach"{

    device_name="/dev/sdh"

    volume_id=aws_ebs_volume.storage.id

    instance_id=aws_instance.os1.id

}

Before:

No alt text provided for this image
No alt text provided for this image

After:

No alt text provided for this image
No alt text provided for this image


Thank you.........!!!!!!!!!

要查看或添加评论,请登录

Srilekha Mule的更多文章

  • K-means Clustering

    K-means Clustering

    What is Clustering? Clustering is one of the most common exploratory data analysis technique used to get an intuition…

  • JavaScript

    JavaScript

    What is JavaScript? JavaScript(JS) is a text-based programming language used both on the client-side and server-side…

  • Confusion Matrix

    Confusion Matrix

    Confusion Matrix: A Confusion matrix is used for evaluating the performance of a classification model. The matrix…

  • GUI container on the Docker

    GUI container on the Docker

    Here i am running gui software firefox on docker container centos and launching a jupyter notebook on firefox . I…

  • How to run a machine learing model on Docker

    How to run a machine learing model on Docker

    Step1: Pull the docker container image of centOS image from DockerHub using the following command: # docker run -it…

    2 条评论

社区洞察

其他会员也浏览了