The document shows code for using the Face and Computer Vision APIs to analyze an image. It detects faces in the image and extracts attributes like age, gender, and emotion. It also performs optical character recognition on the image to recognize text and extract the recognized lines of text. The attributes and text are then stored in lists for further processing.
1 of 15
More Related Content
Xamarin で Cognitive Services を聞ってみよう
7. // FaceClient
FaceClient faceClient = new FaceClient(new ApiKeyServiceClientCredentials("<Face API Key>"),
new System.Net.Http.DelegatingHandler[] { });
faceClient.Endpoint = "<Face API Endpoint>";
//
FaceAttributeType[] faceAttributes = { FaceAttributeType.Age, FaceAttributeType.Gender, FaceAttributeType.Emotion };
// API ,
IList<DetectedFace> faceList = await faceClient.Face.DetectWithUrlAsync("<Image URL>", true, false, faceAttributes);
//
List<FaceEmotion> emotions = new List<FaceEmotion>();
foreach (var face in faceList)
{
emotions.Add(new FaceEmotion
{
Age = face.FaceAttributes.Age,
Gender = ((Gender)face.FaceAttributes.Gender).ToString(),
Happiness = face.FaceAttributes.Emotion.Happiness * 100d
});
}
8. // ComputerVisionClient
var cvClient = new ComputerVisionClient(new ApiKeyServiceClientCredentials("<Face API Key>"),
new System.Net.Http.DelegatingHandler[] { });
cvClient.Endpoint = "<Face API Endpoint>";
// API
var textHeaders = await cvClient.RecognizeTextAsync("<Image URL>", TextRecognitionMode.Handwritten);
var operationLocation = textHeaders.OperationLocation;
var operationId = textHeaders.OperationLocation.Substring(operationLocation.Length - numberOfCharsInOperationId);
// API
var result = await cvClient.GetTextOperationResultAsync(operationId);
var lines = result.RecognitionResult.Lines;
//
var sb = new StringBuilder();
foreach (var line in lines)
{
sb.Append(line.Text);
}