diff --git a/scenarios/README.md b/scenarios/README.md
index 8870f27be..d46d4bb21 100644
--- a/scenarios/README.md
+++ b/scenarios/README.md
@@ -71,17 +71,20 @@ This sample application transcribes an audio recording using Cognitive Services or Speech resource key. Required.
-* `--speechRegion REGION`: Your Cognitive Services or Speech resource region. Examples: `eastus`, `northeurope` Required.
+`Usage : post-call-analytics [options]`
-* `--openAiKey KEY`: Your Azure OpenAI resource key. Required.
-* `--openAiEndpoint ENDPOINT`: Your Azure OpenAI resource endpoint. Example: `https://YourResourceName.openai.azure.com` Required.
-* `--openAiDeploymentName OPENAIDEPLOYMENTNAME`: Your Azure OpenAI deployment name. Example: my-gpt-4o-mini Required.
-
-* `--inputAudio FILEPATH`: File path to audio. Required.
+Arguments:
+* ````: Path to the audio file. Required.
+Options:
+* `--speechKey KEY`: Your Cognitive Services or Speech resource key. The value can also be set via SPEECH_KEY environment variable. Required.
+* `--speechRegion REGION`: Your Cognitive Services or Speech resource region. The value can also be set via SPEECH_REGION environment variable. Examples: `eastus`, `northeurope`. Required.
+* `--openAiKey KEY`: Your Azure OpenAI resource key. The value can also be set via AOAI_KEY environment variable. Optional.
+* `--openAiEndpoint ENDPOINT`: Your Azure OpenAI resource endpoint. Example: `https://YourResourceName.openai.azure.com`. The value can also be set via AOAI_ENDPOINT environment variable. Optional.
+* `--openAiDeploymentName OPENAIDEPLOYMENTNAME`: Your Azure OpenAI deployment name. The value can also be set via AOAI_DEPLOYMENT_NAME environment variable. Example: my-gpt-4o-mini. Optional.
* `--help`: Show the usage help and stop
+
## Call Center Transcription and Analytics
Visit the [call center transcription quickstart](https://learn.microsoft.com/azure/cognitive-services/speech-service/call-center-quickstart) for a detailed guide on how to get started transcribing call recordings using the Speech and Language Services.
diff --git a/scenarios/csharp/dotnetcore/post-call-analytics/Program.cs b/scenarios/csharp/dotnetcore/post-call-analytics/Program.cs
index 510f6bff3..48fdb4bf0 100644
--- a/scenarios/csharp/dotnetcore/post-call-analytics/Program.cs
+++ b/scenarios/csharp/dotnetcore/post-call-analytics/Program.cs
@@ -74,27 +74,46 @@ internal static async Task SummarizeAsync(string openAiKey, string openA
internal static async Task AnalyzeAudioAsync(string speechKey, string speechRegion, FileInfo inputAudio, string openAiKey, string openAiEndpoint, string deploymentOrModelName)
{
- if (string.IsNullOrEmpty(speechKey) || string.IsNullOrEmpty(speechRegion) || (inputAudio == null || !inputAudio.Exists) || string.IsNullOrEmpty(openAiKey) || string.IsNullOrEmpty(openAiEndpoint) || string.IsNullOrEmpty(deploymentOrModelName))
- {
- Console.WriteLine("Error: missing required option");
- return;
- }
-
var transcription = await TranscribeAsync(speechKey, speechRegion, inputAudio);
Console.WriteLine($"Transcription: {transcription}");
- var summary = await SummarizeAsync(openAiKey, openAiEndpoint, deploymentOrModelName, transcription);
- Console.WriteLine($"Summary: {summary}");
+ if (!string.IsNullOrEmpty(openAiKey) && !string.IsNullOrEmpty(openAiEndpoint) && !string.IsNullOrEmpty(deploymentOrModelName))
+ {
+ var summary = await SummarizeAsync(openAiKey, openAiEndpoint, deploymentOrModelName, transcription);
+ Console.WriteLine($"Summary: {summary}");
+ }
+ else
+ {
+ Console.WriteLine($"Missing AOAI configuration. Skipping Summarization");
+ }
}
public async static Task Main(string[] args)
{
- var inputAudio = new Option(name: "--inputAudio", description: "Path to the audio file. Required.");
- var speechKey = new Option(name: "--speechKey", description: "Your Cognitive Services or Speech resource key. Required.");
- var speechRegion = new Option(name: "--speechRegion", description: "Your Cognitive Services or Speech resource region. Example: eastus, northeurope. Required.");
- var openAiKey = new Option(name: "--openAiKey", description: "Your Azure OpenAI resource key. Required.");
- var openAiEndpoint = new Option(name: "--openAiEndpoint", description: "Your Azure OpenAI resource endpoint. Required. Example: https://YourResourceName.openai.azure.com");
- var openAiDeploymentName = new Option(name: "--openAiDeploymentName", description: "Your Azure OpenAI deployment name. Example: my-gpt-4o-mini. Required.");
+ var inputAudio = new Argument(name: "inputAudio", description: "Path to the audio file. Required.");
+
+ // Speech service is used for transcription.
+ var speechKey = new Option(name: "--speechKey", description: "Your Cognitive Services or Speech resource key. Required.", getDefaultValue: () => Environment.GetEnvironmentVariable("SPEECH_KEY"));
+ speechKey.AddValidator(result =>
+ {
+ if (string.IsNullOrEmpty(result.GetValueForOption(speechKey)))
+ {
+ result.ErrorMessage = $"Speech key is required. Set via --{speechKey.Name} or SPEECH_KEY environment variable.";
+ }
+ });
+ var speechRegion = new Option(name: "--speechRegion", description: "Your Cognitive Services or Speech resource region. Example: eastus, northeurope. Required.", getDefaultValue: () => Environment.GetEnvironmentVariable("SPEECH_REGION"));
+ speechRegion.AddValidator(result =>
+ {
+ if (string.IsNullOrEmpty(result.GetValueForOption(speechRegion)))
+ {
+ result.ErrorMessage = $"Speech region is required. Set via --{speechRegion.Name} or SPEECH_REGION environment variable.";
+ }
+ });
+
+ // AOAI is used for summarization. This step is optional.
+ var openAiKey = new Option(name: "--openAiKey", description: "Your Azure OpenAI resource key. Optional.", getDefaultValue: () => Environment.GetEnvironmentVariable("AOAI_KEY"));
+ var openAiEndpoint = new Option(name: "--openAiEndpoint", description: "Your Azure OpenAI resource endpoint. Optional. Example: https://YourResourceName.openai.azure.com", getDefaultValue: () => Environment.GetEnvironmentVariable("AOAI_ENDPOINT"));
+ var openAiDeploymentName = new Option(name: "--openAiDeploymentName", description: "Your Azure OpenAI deployment name. Example: my-gpt-4o-mini. Optional.", getDefaultValue: () => Environment.GetEnvironmentVariable("AOAI_DEPLOYMENT_NAME"));
var rootCommand = new RootCommand()
{
diff --git a/scenarios/csharp/dotnetcore/post-call-analytics/README.md b/scenarios/csharp/dotnetcore/post-call-analytics/README.md
index 1b5473e29..15ef05483 100644
--- a/scenarios/csharp/dotnetcore/post-call-analytics/README.md
+++ b/scenarios/csharp/dotnetcore/post-call-analytics/README.md
@@ -5,15 +5,17 @@ This sample application transcribes an audio recording using Cognitive Services or Speech resource key. Required.
-* `--speechRegion REGION`: Your Cognitive Services or Speech resource region. Examples: `eastus`, `northeurope` Required.
+`Usage : post-call-analytics [options]`
-* `--openAiKey KEY`: Your Azure OpenAI resource key. Required.
-* `--openAiEndpoint ENDPOINT`: Your Azure OpenAI resource endpoint. Example: `https://YourResourceName.openai.azure.com` Required.
-* `--openAiDeploymentName OPENAIDEPLOYMENTNAME`: Your Azure OpenAI deployment name. Example: my-gpt-4o-mini Required.
-
-* `--inputAudio FILEPATH`: File path to audio. Required.
+Arguments:
+* ````: Path to the audio file. Required.
+Options:
+* `--speechKey KEY`: Your Cognitive Services or Speech resource key. The value can also be set via SPEECH_KEY environment variable. Required.
+* `--speechRegion REGION`: Your Cognitive Services or Speech resource region. The value can also be set via SPEECH_REGION environment variable. Examples: `eastus`, `northeurope`. Required.
+* `--openAiKey KEY`: Your Azure OpenAI resource key. The value can also be set via AOAI_KEY environment variable. Optional.
+* `--openAiEndpoint ENDPOINT`: Your Azure OpenAI resource endpoint. Example: `https://YourResourceName.openai.azure.com`. The value can also be set via AOAI_ENDPOINT environment variable. Optional.
+* `--openAiDeploymentName OPENAIDEPLOYMENTNAME`: Your Azure OpenAI deployment name. The value can also be set via AOAI_DEPLOYMENT_NAME environment variable. Example: my-gpt-4o-mini. Optional.
* `--help`: Show the usage help and stop