【Google Cloud】Emotional score
Emotional scoreというWebアプリを開発しましたので、その紹介をします。
1.基本動作
・画像をAIが分析し、喜び・悲しみ・怒り・驚きの感情を100点満点で点数化します。
・詳細な評価結果を、音声出力することができます。
∴デモ動画で利用している写真は全て無料素材です。
2.仕組み
・システムアーキテクチャ、プロジェクトの機能と特徴、操作方法、sample_code.gsは以下のとおりです。
<sample_code.gs>
const SPREADSHEET_ID = 'SPREADSHEETのID';
const SHEET_NAME = 'シート名';
const DRIVE_FOLDER_ID = 'google driveのフォルダID';
const API_KEY = 'google api key';
const VOICEVOX_API_KEY = 'VOICEVOX_API_KEY'; // ここにAPIキーを保存
function doGet() {
return HtmlService.createHtmlOutputFromFile('index');
}
function getVoiceVoxApiKey() {
return VOICEVOX_API_KEY;
}
function analyzeImage(imageData) {
const fileName = `image_${new Date().getTime()}.png`; // ファイル名をバッククォートで囲む必要があります
const blob = Utilities.newBlob(Utilities.base64Decode(imageData.split(',')[1]), 'image/png', fileName);
const driveFile = DriveApp.getFolderById(DRIVE_FOLDER_ID).createFile(blob);
const visionUrl = `https://vision.googleapis.com/v1/images:annotate?key=${API_KEY}`; // URLもバッククォートで囲む必要があります
const requestPayload = {
requests: [{
image: {
content: imageData.split(',')[1]
},
features: [
{ type: 'LABEL_DETECTION' },
{ type: 'FACE_DETECTION', maxResults: 30 },
{ type: 'LANDMARK_DETECTION' },
{ type: 'LOGO_DETECTION' },
{ type: 'TEXT_DETECTION' },
{ type: 'SAFE_SEARCH_DETECTION' },
{ type: 'IMAGE_PROPERTIES' },
{ type: 'WEB_DETECTION' }
]
}]
};
const response = UrlFetchApp.fetch(visionUrl, {
method: 'post',
contentType: 'application/json',
payload: JSON.stringify(requestPayload)
});
const responseData = JSON.parse(response.getContentText());
const analysisResult = processVisionResponse(responseData, fileName);
saveAnalysisResult(analysisResult);
return analysisResult;
}
function processVisionResponse(responseData, fileName) {
const result = responseData.responses[0];
const faceAnnotations = result.faceAnnotations || [];
// 日本標準時でタイムスタンプを取得し、表示形式を "YYYY/MM/DD HH:MM:SS" に変更
const jstTimestamp = new Date().toLocaleString("ja-JP", { timeZone: "Asia/Tokyo", hour12: false });
return {
timestamp: jstTimestamp,
fileName: fileName,
emotions: faceAnnotations, // 各人物の感情データをそのまま保持
faceScoreText: calculateFaceScore(processEmotions(faceAnnotations)), // 平均値スコア
labels: JSON.stringify(result.labelAnnotations || []),
landmarks: JSON.stringify(result.landmarkAnnotations || []),
logos: JSON.stringify(result.logoAnnotations || []),
texts: JSON.stringify(result.textAnnotations || []),
safeSearch: JSON.stringify(result.safeSearchAnnotation || {}),
dominantColors: JSON.stringify(result.imagePropertiesAnnotation?.dominantColors || []),
webEntities: JSON.stringify(result.webDetection?.webEntities || [])
};
}
function formatEmotions(faceAnnotations) {
return faceAnnotations.map((face, index) => {
return `Person ${index + 1}: Joy - ${face.joyLikelihood}, Sorrow - ${face.sorrowLikelihood}, Anger - ${face.angerLikelihood}, Surprise - ${face.surpriseLikelihood}`;
}).join(' | ');
}
function processEmotions(faceAnnotations) {
const emotions = { Joy: [], Sorrow: [], Anger: [], Surprise: [] };
faceAnnotations.forEach(face => {
const joyLikelihood = face.joyLikelihood;
const sorrowLikelihood = face.sorrowLikelihood;
const angerLikelihood = face.angerLikelihood;
const surpriseLikelihood = face.surpriseLikelihood;
emotions.Joy.push(likelihoodToScore(joyLikelihood));
emotions.Sorrow.push(likelihoodToScore(sorrowLikelihood));
emotions.Anger.push(likelihoodToScore(angerLikelihood));
emotions.Surprise.push(likelihoodToScore(surpriseLikelihood));
});
return {
Joy: calculateAverage(emotions.Joy),
Sorrow: calculateAverage(emotions.Sorrow),
Anger: calculateAverage(emotions.Anger),
Surprise: calculateAverage(emotions.Surprise)
};
}
function likelihoodToScore(likelihood) {
switch (likelihood) {
case 'VERY_UNLIKELY': return 0;
case 'UNLIKELY': return 50;
case 'POSSIBLE': return 80;
case 'LIKELY': return 90;
case 'VERY_LIKELY': return 100;
default: return 0;
}
}
function calculateAverage(scores) {
if (scores.length === 0) return 0;
return scores.reduce((a, b) => a + b, 0) / scores.length;
}
function calculateFaceScore(emotions) {
return `Joy: ${emotions.Joy}, Sorrow: ${emotions.Sorrow}, Anger: ${emotions.Anger}, Surprise: ${emotions.Surprise}`;
}
function saveAnalysisResult(analysisResult) {
const sheet = SpreadsheetApp.openById(SPREADSHEET_ID).getSheetByName(SHEET_NAME);
const lastRow = sheet.getLastRow();
const nextRow = lastRow + 1;
// emotions を適切にフォーマットして、各人物ごとの感情データを保存
const formattedEmotions = analysisResult.emotions.map((face, index) => {
return `Person ${index + 1}: Joy - ${face.joyLikelihood}, Sorrow - ${face.sorrowLikelihood}, Anger - ${face.angerLikelihood}, Surprise - ${face.surpriseLikelihood}`;
}).join(' | ');
// 11列のデータを保存するために範囲を11列に設定
sheet.getRange(nextRow, 1, 1, 11).setValues([[
analysisResult.timestamp,
analysisResult.fileName,
formattedEmotions, // 各人物ごとの感情データを保存
analysisResult.faceScoreText, // こちらは合算されたスコア
analysisResult.labels,
analysisResult.landmarks,
analysisResult.logos,
analysisResult.texts,
analysisResult.safeSearch,
analysisResult.dominantColors,
analysisResult.webEntities
]]);
}
function saveAnnotatedImage(imageData, fileName) {
const blob = Utilities.newBlob(Utilities.base64Decode(imageData.split(',')[1]), 'image/png', `annotated_${fileName}`);
DriveApp.getFolderById(DRIVE_FOLDER_ID).createFile(blob);
}
<sample_cloudrun_index.js>
const express = require('express');
const { Storage } = require('@google-cloud/storage');
const axios = require('axios');
const app = express();
app.use(express.json()); // JSONパーサーを追加
const storage = new Storage();
const bucketName = 'my-bucket-emotionalscore'; // バケット名を指定
const API_KEY = 'Vision APIのキー'; // Vision APIのキー
// CORS対応のミドルウェアを追加
app.use((req, res, next) => {
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization');
// プリフライトリクエスト(OPTIONSリクエスト)への対応
if (req.method === 'OPTIONS') {
return res.status(204).send('');
}
next();
});
// POSTリクエストで画像を受け取り、処理する
app.post('/analyzeImage', async (req, res) => {
const { image } = req.body;
if (!image) {
return res.status(400).send({ error: 'Image data is required' });
}
try {
// 画像をCloud Storageにアップロード
const uploadResponse = await uploadToCloudStorage(image);
console.log('File uploaded to Cloud Storage:', uploadResponse);
// Vision APIで画像を解析
const analysisResult = await analyzeImageWithVision(image);
console.log('Vision API Analysis Result:', analysisResult);
res.status(200).send({
success: true,
message: 'Image analyzed successfully',
uploadResponse,
analysisResult
});
} catch (error) {
console.error('Error processing image:', error);
res.status(500).send({ error: 'Failed to analyze image' });
}
});
// 画像をCloud Storageにアップロードする関数
async function uploadToCloudStorage(imageData) {
const fileName = `image_${Date.now()}.png`;
const buffer = Buffer.from(imageData.split(',')[1], 'base64');
const file = storage.bucket(bucketName).file(fileName);
await file.save(buffer, {
metadata: { contentType: 'image/png' },
public: true
});
const publicUrl = `https://storage.googleapis.com/${bucketName}/${fileName}`;
return { fileName, publicUrl };
}
// Vision APIで画像を解析する関数
async function analyzeImageWithVision(imageData) {
const visionUrl = `https://vision.googleapis.com/v1/images:annotate?key=${API_KEY}`;
const requestPayload = {
requests: [
{
image: {
content: imageData.split(',')[1]
},
features: [
{ type: 'LABEL_DETECTION' },
{ type: 'FACE_DETECTION', maxResults: 30 },
{ type: 'LANDMARK_DETECTION' },
{ type: 'LOGO_DETECTION' },
{ type: 'TEXT_DETECTION' },
{ type: 'SAFE_SEARCH_DETECTION' },
{ type: 'IMAGE_PROPERTIES' },
{ type: 'WEB_DETECTION' }
]
}
]
};
const response = await axios.post(visionUrl, requestPayload, {
headers: { 'Content-Type': 'application/json' }
});
return response.data.responses[0];
}
// アプリを起動
const port = process.env.PORT || 8080;
app.listen(port, () => {
console.log(`Server listening on port ${port}`);
});
3.その他
・このWebアプリは、AI Hackathon with Google Cloudに参加しています。