在儲存的影片中搜尋人臉 - Amazon Rekognition

本文為英文版的機器翻譯版本,如內容有任何歧義或不一致之處,概以英文版為準。

在儲存的影片中搜尋人臉

您可以在集合搜尋與儲存的影片或串流影片中偵測到的人物相符的人臉。此區段涵蓋在儲存的影片中搜尋人臉的操作。如需在串流影片搜尋人臉的相關資訊,請參閱 使用串流視訊事件

您搜尋的臉孔必須先使用編製索引至集合中IndexFaces。如需詳細資訊,請參閱 新增人臉到集合

Amazon Rekognition Video 人臉搜尋與分析儲存於 Amazon S3 儲存貯體中的影片的其它 Amazon Rekognition Video 操作一樣,採用相同的非同步工作流程。若要開始搜尋已儲存視訊中的臉孔,請撥打StartFaceSearch並提供您要搜尋之系列的 ID。Amazon Rekognition Video 會將影片分析的完成状态发布至 Amazon Simple Notification Service (Amazon SNS) 主题。如果視頻分析成功,請致電GetFaceSearch以獲取搜索結果。如需開始影片分析並取得結果的詳細資訊,請參閱 呼叫 Amazon Rekognition Video 操作

下列程序說明如何在集合中搜尋與影片中偵測到的人物相符之人臉。程序也將說明如何取得影片中相符人物的追蹤資料。此程序會展開 使用 Java 或 Python (SDK) 分析儲存於 Amazon S3 儲存貯體中的影片 中的程式碼,該操作使用 Amazon Simple Queue Service (Amazon SQS) 佇列來取得影片分析要求的完成狀態。

搜尋與臉部相符的影片 (開發套件)
  1. 建立集合

  2. 將臉部編入集合索引

  3. 執行 使用 Java 或 Python (SDK) 分析儲存於 Amazon S3 儲存貯體中的影片

  4. 將下列程式碼新增至您在步驟 3 中建立的類別 VideoDetect

    Java
    //Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. //PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.) //Face collection search in video ================================================================== private static void StartFaceSearchCollection(String bucket, String video, String collection) throws Exception{ NotificationChannel channel= new NotificationChannel() .withSNSTopicArn(snsTopicArn) .withRoleArn(roleArn); StartFaceSearchRequest req = new StartFaceSearchRequest() .withCollectionId(collection) .withVideo(new Video() .withS3Object(new S3Object() .withBucket(bucket) .withName(video))) .withNotificationChannel(channel); StartFaceSearchResult startPersonCollectionSearchResult = rek.startFaceSearch(req); startJobId=startPersonCollectionSearchResult.getJobId(); } //Face collection search in video ================================================================== private static void GetFaceSearchCollectionResults() throws Exception{ GetFaceSearchResult faceSearchResult=null; int maxResults=10; String paginationToken=null; do { if (faceSearchResult !=null){ paginationToken = faceSearchResult.getNextToken(); } faceSearchResult = rek.getFaceSearch( new GetFaceSearchRequest() .withJobId(startJobId) .withMaxResults(maxResults) .withNextToken(paginationToken) .withSortBy(FaceSearchSortBy.TIMESTAMP) ); VideoMetadata videoMetaData=faceSearchResult.getVideoMetadata(); System.out.println("Format: " + videoMetaData.getFormat()); System.out.println("Codec: " + videoMetaData.getCodec()); System.out.println("Duration: " + videoMetaData.getDurationMillis()); System.out.println("FrameRate: " + videoMetaData.getFrameRate()); System.out.println(); //Show search results List<PersonMatch> matches= faceSearchResult.getPersons(); for (PersonMatch match: matches) { long milliSeconds=match.getTimestamp(); System.out.print("Timestamp: " + Long.toString(milliSeconds)); System.out.println(" Person number: " + match.getPerson().getIndex()); List <FaceMatch> faceMatches = match.getFaceMatches(); if (faceMatches != null) { System.out.println("Matches in collection..."); for (FaceMatch faceMatch: faceMatches){ Face face=faceMatch.getFace(); System.out.println("Face Id: "+ face.getFaceId()); System.out.println("Similarity: " + faceMatch.getSimilarity().toString()); System.out.println(); } } System.out.println(); } System.out.println(); } while (faceSearchResult !=null && faceSearchResult.getNextToken() != null); }

    在函數 main 中,將下行:

    StartLabelDetection(bucket, video); if (GetSQSMessageSuccess()==true) GetLabelDetectionResults();

    取代為:

    String collection="collection"; StartFaceSearchCollection(bucket, video, collection); if (GetSQSMessageSuccess()==true) GetFaceSearchCollectionResults();
    Java V2

    此代碼取自 AWS 文檔 SDK 示例 GitHub 存儲庫。請參閱此處的完整範例。

    import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.rekognition.RekognitionClient; import software.amazon.awssdk.services.rekognition.model.*; import java.util.List; /** * Before running this Java V2 code example, set up your development * environment, including your credentials. * * For more information, see the following documentation topic: * * https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/get-started.html */ public class VideoDetectFaces { private static String startJobId = ""; public static void main(String[] args) { final String usage = """ Usage: <bucket> <video> <topicArn> <roleArn> Where: bucket - The name of the bucket in which the video is located (for example, (for example, myBucket).\s video - The name of video (for example, people.mp4).\s topicArn - The ARN of the Amazon Simple Notification Service (Amazon SNS) topic.\s roleArn - The ARN of the AWS Identity and Access Management (IAM) role to use.\s """; if (args.length != 4) { System.out.println(usage); System.exit(1); } String bucket = args[0]; String video = args[1]; String topicArn = args[2]; String roleArn = args[3]; Region region = Region.US_EAST_1; RekognitionClient rekClient = RekognitionClient.builder() .region(region) .build(); NotificationChannel channel = NotificationChannel.builder() .snsTopicArn(topicArn) .roleArn(roleArn) .build(); startFaceDetection(rekClient, channel, bucket, video); getFaceResults(rekClient); System.out.println("This example is done!"); rekClient.close(); } public static void startFaceDetection(RekognitionClient rekClient, NotificationChannel channel, String bucket, String video) { try { S3Object s3Obj = S3Object.builder() .bucket(bucket) .name(video) .build(); Video vidOb = Video.builder() .s3Object(s3Obj) .build(); StartFaceDetectionRequest faceDetectionRequest = StartFaceDetectionRequest.builder() .jobTag("Faces") .faceAttributes(FaceAttributes.ALL) .notificationChannel(channel) .video(vidOb) .build(); StartFaceDetectionResponse startLabelDetectionResult = rekClient.startFaceDetection(faceDetectionRequest); startJobId = startLabelDetectionResult.jobId(); } catch (RekognitionException e) { System.out.println(e.getMessage()); System.exit(1); } } public static void getFaceResults(RekognitionClient rekClient) { try { String paginationToken = null; GetFaceDetectionResponse faceDetectionResponse = null; boolean finished = false; String status; int yy = 0; do { if (faceDetectionResponse != null) paginationToken = faceDetectionResponse.nextToken(); GetFaceDetectionRequest recognitionRequest = GetFaceDetectionRequest.builder() .jobId(startJobId) .nextToken(paginationToken) .maxResults(10) .build(); // Wait until the job succeeds. while (!finished) { faceDetectionResponse = rekClient.getFaceDetection(recognitionRequest); status = faceDetectionResponse.jobStatusAsString(); if (status.compareTo("SUCCEEDED") == 0) finished = true; else { System.out.println(yy + " status is: " + status); Thread.sleep(1000); } yy++; } finished = false; // Proceed when the job is done - otherwise VideoMetadata is null. VideoMetadata videoMetaData = faceDetectionResponse.videoMetadata(); System.out.println("Format: " + videoMetaData.format()); System.out.println("Codec: " + videoMetaData.codec()); System.out.println("Duration: " + videoMetaData.durationMillis()); System.out.println("FrameRate: " + videoMetaData.frameRate()); System.out.println("Job"); // Show face information. List<FaceDetection> faces = faceDetectionResponse.faces(); for (FaceDetection face : faces) { String age = face.face().ageRange().toString(); String smile = face.face().smile().toString(); System.out.println("The detected face is estimated to be" + age + " years old."); System.out.println("There is a smile : " + smile); } } while (faceDetectionResponse != null && faceDetectionResponse.nextToken() != null); } catch (RekognitionException | InterruptedException e) { System.out.println(e.getMessage()); System.exit(1); } } }
    Python
    #Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. #PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.) # ============== Face Search =============== def StartFaceSearchCollection(self,collection): response = self.rek.start_face_search(Video={'S3Object':{'Bucket':self.bucket,'Name':self.video}}, CollectionId=collection, NotificationChannel={'RoleArn':self.roleArn, 'SNSTopicArn':self.snsTopicArn}) self.startJobId=response['JobId'] print('Start Job Id: ' + self.startJobId) def GetFaceSearchCollectionResults(self): maxResults = 10 paginationToken = '' finished = False while finished == False: response = self.rek.get_face_search(JobId=self.startJobId, MaxResults=maxResults, NextToken=paginationToken) print(response['VideoMetadata']['Codec']) print(str(response['VideoMetadata']['DurationMillis'])) print(response['VideoMetadata']['Format']) print(response['VideoMetadata']['FrameRate']) for personMatch in response['Persons']: print('Person Index: ' + str(personMatch['Person']['Index'])) print('Timestamp: ' + str(personMatch['Timestamp'])) if ('FaceMatches' in personMatch): for faceMatch in personMatch['FaceMatches']: print('Face ID: ' + faceMatch['Face']['FaceId']) print('Similarity: ' + str(faceMatch['Similarity'])) print() if 'NextToken' in response: paginationToken = response['NextToken'] else: finished = True print()

    在函數 main 中,將下行:

    analyzer.StartLabelDetection() if analyzer.GetSQSMessageSuccess()==True: analyzer.GetLabelDetectionResults()

    取代為:

    collection='tests' analyzer.StartFaceSearchCollection(collection) if analyzer.GetSQSMessageSuccess()==True: analyzer.GetFaceSearchCollectionResults()

    如果已執行 使用 Java 或 Python (SDK) 分析儲存於 Amazon S3 儲存貯體中的影片 以外的影片範例,要取代的程式碼可能會不同。

  5. collection 的值變更為您已在步驟 1 建立的集合名稱。

  6. 執行程式碼。影片中的人物清單,而這些人物的人臉符合輸入集合中顯示的人臉。也將顯示每個符合的人物之追蹤資料。

GetFaceSearch 作業回應

以下是 GetFaceSearch 的 JSON 回應範例。

回應包含一系列的人物 (Persons),這些在影片中偵測到的人物人臉符合輸入集合中的人臉。每次在視訊中符合人員時 PersonMatch,都會存在陣列元素。每個都PersonMatch包括來自輸入集合的臉部匹配的陣列 FaceMatch,有關匹配的人的信息 PersonDetail,以及該人在視頻中匹配的時間。

{ "JobStatus": "SUCCEEDED", "NextToken": "IJdbzkZfvBRqj8GPV82BPiZKkLOGCqDIsNZG/gQsEE5faTVK9JHOz/xxxxxxxxxxxxxxx", "Persons": [ { "FaceMatches": [ { "Face": { "BoundingBox": { "Height": 0.527472972869873, "Left": 0.33530598878860474, "Top": 0.2161169946193695, "Width": 0.35503000020980835 }, "Confidence": 99.90239715576172, "ExternalImageId": "image.PNG", "FaceId": "a2f2e224-bfaa-456c-b360-7c00241e5e2d", "ImageId": "eb57ed44-8d8d-5ec5-90b8-6d190daff4c3" }, "Similarity": 98.40909576416016 } ], "Person": { "BoundingBox": { "Height": 0.8694444298744202, "Left": 0.2473958283662796, "Top": 0.10092592239379883, "Width": 0.49427083134651184 }, "Face": { "BoundingBox": { "Height": 0.23000000417232513, "Left": 0.42500001192092896, "Top": 0.16333332657814026, "Width": 0.12937499582767487 }, "Confidence": 99.97504425048828, "Landmarks": [ { "Type": "eyeLeft", "X": 0.46415066719055176, "Y": 0.2572723925113678 }, { "Type": "eyeRight", "X": 0.5068183541297913, "Y": 0.23705792427062988 }, { "Type": "nose", "X": 0.49765899777412415, "Y": 0.28383663296699524 }, { "Type": "mouthLeft", "X": 0.487221896648407, "Y": 0.3452930748462677 }, { "Type": "mouthRight", "X": 0.5142884850502014, "Y": 0.33167609572410583 } ], "Pose": { "Pitch": 15.966927528381348, "Roll": -15.547388076782227, "Yaw": 11.34195613861084 }, "Quality": { "Brightness": 44.80223083496094, "Sharpness": 99.95819854736328 } }, "Index": 0 }, "Timestamp": 0 }, { "Person": { "BoundingBox": { "Height": 0.2177777737379074, "Left": 0.7593749761581421, "Top": 0.13333334028720856, "Width": 0.12250000238418579 }, "Face": { "BoundingBox": { "Height": 0.2177777737379074, "Left": 0.7593749761581421, "Top": 0.13333334028720856, "Width": 0.12250000238418579 }, "Confidence": 99.63436889648438, "Landmarks": [ { "Type": "eyeLeft", "X": 0.8005779385566711, "Y": 0.20915353298187256 }, { "Type": "eyeRight", "X": 0.8391435146331787, "Y": 0.21049551665782928 }, { "Type": "nose", "X": 0.8191410899162292, "Y": 0.2523227035999298 }, { "Type": "mouthLeft", "X": 0.8093273043632507, "Y": 0.29053622484207153 }, { "Type": "mouthRight", "X": 0.8366993069648743, "Y": 0.29101791977882385 } ], "Pose": { "Pitch": 3.165884017944336, "Roll": 1.4182015657424927, "Yaw": -11.151537895202637 }, "Quality": { "Brightness": 28.910892486572266, "Sharpness": 97.61507415771484 } }, "Index": 1 }, "Timestamp": 0 }, { "Person": { "BoundingBox": { "Height": 0.8388888835906982, "Left": 0, "Top": 0.15833333134651184, "Width": 0.2369791716337204 }, "Face": { "BoundingBox": { "Height": 0.20000000298023224, "Left": 0.029999999329447746, "Top": 0.2199999988079071, "Width": 0.11249999701976776 }, "Confidence": 99.85971069335938, "Landmarks": [ { "Type": "eyeLeft", "X": 0.06842322647571564, "Y": 0.3010137975215912 }, { "Type": "eyeRight", "X": 0.10543643683195114, "Y": 0.29697132110595703 }, { "Type": "nose", "X": 0.09569807350635529, "Y": 0.33701086044311523 }, { "Type": "mouthLeft", "X": 0.0732642263174057, "Y": 0.3757539987564087 }, { "Type": "mouthRight", "X": 0.10589495301246643, "Y": 0.3722417950630188 } ], "Pose": { "Pitch": -0.5589138865470886, "Roll": -5.1093974113464355, "Yaw": 18.69594955444336 }, "Quality": { "Brightness": 43.052337646484375, "Sharpness": 99.68138885498047 } }, "Index": 2 }, "Timestamp": 0 }...... ], "VideoMetadata": { "Codec": "h264", "DurationMillis": 67301, "Format": "QuickTime / MOV", "FrameHeight": 1080, "FrameRate": 29.970029830932617, "FrameWidth": 1920 } }