DragonFire Developer Portal

ECHO 13D SDK

Example Categories

Related Resources

ECHO 13D SDK Examples

Explore practical examples of using the ECHO 13D SDK for voice processing, speech recognition, and voice synthesis in your applications.

Example Code Repository

All examples on this page are available in our GitHub repository with complete projects for C/C++, JavaScript, and Python implementations.

Basic Usage Examples

Initializing ECHO 13D

// Basic ECHO 13D initialization
#include "echo13d.h"
#include "dragonheart.h"
#include "nesh.h"
#include 

int main() {
    // Initialize DragonHeart
    DragonHeart* heart = dragonheart_init(NULL);
    if (!heart) {
        printf("Failed to initialize DragonHeart\n");
        return -1;
    }
    
    // Initialize NESH (optional)
    NESH* nesh = nesh_init(NULL);
    
    // Create ECHO 13D configuration
    echo_config_t config;
    config.dragonheart = heart;
    config.nesh = nesh;
    config.voice_config = NULL;  // Use defaults
    config.flags = 0;            // No special flags
    
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(&config);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        dragonheart_shutdown(heart);
        if (nesh) nesh_shutdown(nesh);
        return -1;
    }
    
    printf("ECHO 13D initialized successfully\n");
    printf("Version: %s\n", echo_get_version());
    
    // Get system information
    echo_system_info_t info;
    echo_get_system_info(echo, &info);
    
    printf("Dimensions: %d\n", info.dimension_count);
    printf("Voice templates: %d\n", info.voice_template_count);
    printf("NESH connected: %s\n", info.nesh_connected ? "Yes" : "No");
    
    // Shutdown components
    echo_shutdown(echo);
    if (nesh) nesh_shutdown(nesh);
    dragonheart_shutdown(heart);
    
    return 0;
}

Processing Audio Input

// Process audio from microphone
#include "echo13d.h"
#include 

int main() {
    // Initialize ECHO 13D with default configuration
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Open default audio input device
    audio_device_t* input_device = open_audio_input_device(DEFAULT_DEVICE);
    if (!input_device) {
        printf("Failed to open audio input device\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Recording audio for 5 seconds...\n");
    
    // Record audio (5 seconds)
    audio_buffer_t* audio = record_audio(input_device, 5.0);
    if (!audio) {
        printf("Failed to record audio\n");
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Processing audio...\n");
    
    // Process audio (full processing includes recognition, transcription, and analysis)
    echo_result_t* result = echo_process_audio(echo, audio, PROCESS_FULL);
    if (!result) {
        printf("Failed to process audio\n");
        free_audio_buffer(audio);
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Print transcription result
    if (result->transcription) {
        printf("Transcription: %s\n", result->transcription->text);
        printf("Confidence: %.2f\n", result->transcription->confidence);
    }
    
    // Print recognition result if voice was recognized
    if (result->recognition && result->recognition->recognized) {
        printf("Voice recognized: %s\n", result->recognition->identity->name);
        printf("Recognition confidence: %.2f\n", result->recognition->confidence);
    } else {
        printf("Voice not recognized\n");
    }
    
    // Print emotion analysis if available
    if (result->analysis && result->analysis->emotion) {
        printf("Detected emotion: %s (%.2f confidence)\n", 
               get_emotion_name(result->analysis->emotion->primary_emotion),
               result->analysis->emotion->primary_confidence);
        
        printf("Emotion intensity: %.2f\n", result->analysis->emotion->intensity);
    }
    
    // Clean up resources
    free_echo_result(result);
    free_audio_buffer(audio);
    close_audio_device(input_device);
    echo_shutdown(echo);
    
    return 0;
}

Voice Recognition Examples

Registering a Voice Profile

// Register a new voice profile
#include "echo13d.h"
#include 

int main(int argc, char** argv) {
    // Check if user name is provided
    if (argc < 2) {
        printf("Usage: %s \n", argv[0]);
        return -1;
    }
    
    const char* user_name = argv[1];
    
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Create voice identity
    voice_identity_t* identity = create_voice_identity(user_name);
    
    // Open audio device
    audio_device_t* device = open_audio_input_device(DEFAULT_DEVICE);
    if (!device) {
        printf("Failed to open audio device\n");
        free_voice_identity(identity);
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Please speak for 10 seconds to register your voice...\n");
    
    // Record voice sample (10 seconds)
    audio_buffer_t* audio = record_audio(device, 10.0);
    if (!audio) {
        printf("Failed to record audio\n");
        close_audio_device(device);
        free_voice_identity(identity);
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Processing voice sample...\n");
    
    // Extract voice pattern
    voice_pattern_t* pattern = extract_voice_pattern(echo, audio);
    if (!pattern) {
        printf("Failed to extract voice pattern\n");
        free_audio_buffer(audio);
        close_audio_device(device);
        free_voice_identity(identity);
        echo_shutdown(echo);
        return -1;
    }
    
    // Extract voice signature
    voice_signature_t* signature = extract_voice_signature(echo, pattern);
    if (!signature) {
        printf("Failed to extract voice signature\n");
        free_voice_pattern(pattern);
        free_audio_buffer(audio);
        close_audio_device(device);
        free_voice_identity(identity);
        echo_shutdown(echo);
        return -1;
    }
    
    // Save to NESH
    bool success = save_voice_to_nesh(echo, identity, signature);
    
    if (success) {
        printf("Voice profile for %s registered successfully!\n", user_name);
    } else {
        printf("Failed to register voice profile\n");
    }
    
    // Clean up
    free_voice_signature(signature);
    free_voice_pattern(pattern);
    free_audio_buffer(audio);
    close_audio_device(device);
    free_voice_identity(identity);
    echo_shutdown(echo);
    
    return success ? 0 : -1;
}

Voice Authentication

// Authenticate user by voice
#include "echo13d.h"
#include 

#define AUTHENTICATION_THRESHOLD 0.85  // 85% confidence threshold

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Open audio device
    audio_device_t* device = open_audio_input_device(DEFAULT_DEVICE);
    if (!device) {
        printf("Failed to open audio device\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Please say your authentication phrase...\n");
    
    // Record voice sample (3 seconds)
    audio_buffer_t* audio = record_audio(device, 3.0);
    if (!audio) {
        printf("Failed to record audio\n");
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Recognize voice
    recognition_result_t* result = recognize_voice(echo, audio);
    if (!result) {
        printf("Voice recognition failed\n");
        free_audio_buffer(audio);
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Check if voice was recognized with sufficient confidence
    if (result->recognized && result->confidence >= AUTHENTICATION_THRESHOLD) {
        printf("Authentication successful!\n");
        printf("Welcome, %s (%.2f confidence)\n", 
               result->identity->name, result->confidence);
               
        // Retrieve user information
        user_info_t* user_info = get_user_info(result->identity);
        
        // Authorize user access
        authorize_user_access(user_info);
        
        free_user_info(user_info);
    } else {
        printf("Authentication failed\n");
        if (result->recognized) {
            printf("Confidence too low: %.2f < %.2f\n", 
                   result->confidence, AUTHENTICATION_THRESHOLD);
        }
    }
    
    // Clean up resources
    free_recognition_result(result);
    free_audio_buffer(audio);
    close_audio_device(device);
    echo_shutdown(echo);
    
    return 0;
}

Speech Analysis Examples

Transcription and Sentiment Analysis

// Transcribe speech and analyze sentiment
#include "echo13d.h"
#include 

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Open audio device
    audio_device_t* device = open_audio_input_device(DEFAULT_DEVICE);
    if (!device) {
        printf("Failed to open audio device\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Please speak for 5 seconds...\n");
    
    // Record voice sample (5 seconds)
    audio_buffer_t* audio = record_audio(device, 5.0);
    if (!audio) {
        printf("Failed to record audio\n");
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Analyze speech
    speech_analysis_t* analysis = analyze_speech(echo, audio);
    if (!analysis) {
        printf("Speech analysis failed\n");
        free_audio_buffer(audio);
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Print transcription
    printf("Transcription: %s\n", analysis->text->text);
    
    // Print semantic analysis
    printf("\nSemantic Analysis:\n");
    printf("Intent: %s (%.2f confidence)\n", 
           get_intent_name(analysis->semantics->primary_intent),
           analysis->semantics->intent_confidence);
    
    // Print entities
    printf("\nEntities:\n");
    for (uint32_t i = 0; i < analysis->semantics->entity_count; i++) {
        entity_t* entity = &analysis->semantics->entities[i];
        printf("- %s (type: %s, confidence: %.2f)\n",
               entity->text, get_entity_type_name(entity->type),
               entity->confidence);
    }
    
    // Print emotion analysis
    printf("\nEmotion Analysis:\n");
    printf("Primary emotion: %s (%.2f confidence)\n", 
           get_emotion_name(analysis->emotion->primary_emotion),
           analysis->emotion->primary_confidence);
    
    if (analysis->emotion->secondary_confidence > 0.3) {
        printf("Secondary emotion: %s (%.2f confidence)\n", 
               get_emotion_name(analysis->emotion->secondary_emotion),
               analysis->emotion->secondary_confidence);
    }
    
    printf("Emotion intensity: %.2f\n", analysis->emotion->intensity);
    
    // Print sentiment scores
    printf("\nSentiment Scores:\n");
    printf("Positive: %.2f\n", analysis->emotion->detailed_metrics->sentiment.positive);
    printf("Negative: %.2f\n", analysis->emotion->detailed_metrics->sentiment.negative);
    printf("Neutral: %.2f\n", analysis->emotion->detailed_metrics->sentiment.neutral);
    
    // Free resources
    free_speech_analysis(analysis);
    free_audio_buffer(audio);
    close_audio_device(device);
    echo_shutdown(echo);
    
    return 0;
}

Language Detection

// Detect spoken language
#include "echo13d.h"
#include 

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Open audio device
    audio_device_t* device = open_audio_input_device(DEFAULT_DEVICE);
    if (!device) {
        printf("Failed to open audio device\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Please speak a few sentences in any language...\n");
    
    // Record voice sample (5 seconds)
    audio_buffer_t* audio = record_audio(device, 5.0);
    if (!audio) {
        printf("Failed to record audio\n");
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Perform language detection
    language_detection_t* detection = detect_language(echo, audio);
    if (!detection) {
        printf("Language detection failed\n");
        free_audio_buffer(audio);
        close_audio_device(device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Print primary language
    printf("Primary language: %s (%.2f confidence)\n", 
           get_language_name(detection->primary_language),
           detection->primary_confidence);
    
    // Print other detected languages if confidence is high enough
    printf("\nOther detected languages:\n");
    for (uint32_t i = 0; i < detection->other_language_count; i++) {
        if (detection->other_confidence[i] > 0.1) {
            printf("- %s (%.2f confidence)\n", 
                   get_language_name(detection->other_languages[i]),
                   detection->other_confidence[i]);
        }
    }
    
    // Free resources
    free_language_detection(detection);
    free_audio_buffer(audio);
    close_audio_device(device);
    echo_shutdown(echo);
    
    return 0;
}

Voice Synthesis Examples

Basic Voice Synthesis

// Basic text-to-speech synthesis
#include "echo13d.h"
#include 
#include 

int main(int argc, char** argv) {
    // Check if text is provided
    if (argc < 2) {
        printf("Usage: %s \"Text to synthesize\"\n", argv[0]);
        return -1;
    }
    
    const char* input_text = argv[1];
    
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Get default voice template
    voice_template_t* voice = get_default_voice_template(echo);
    if (!voice) {
        printf("Failed to get default voice template\n");
        echo_shutdown(echo);
        return -1;
    }
    
    // Create text content
    text_content_t* content = create_text_content(input_text);
    if (!content) {
        printf("Failed to create text content\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Synthesizing speech: \"%s\"\n", input_text);
    
    // Synthesize speech with neutral emotion
    audio_buffer_t* audio = echo_synthesize_speech(echo, content, voice, NULL);
    if (!audio) {
        printf("Speech synthesis failed\n");
        free_text_content(content);
        echo_shutdown(echo);
        return -1;
    }
    
    // Open audio output device
    audio_device_t* output_device = open_audio_output_device(DEFAULT_DEVICE);
    if (!output_device) {
        printf("Failed to open audio output device\n");
        free_audio_buffer(audio);
        free_text_content(content);
        echo_shutdown(echo);
        return -1;
    }
    
    // Play synthesized speech
    bool play_success = play_audio(output_device, audio);
    if (!play_success) {
        printf("Failed to play audio\n");
    } else {
        // Wait for playback to complete
        wait_for_playback_completion(output_device);
    }
    
    // Save audio to file
    save_audio_to_file(audio, "output.wav");
    printf("Saved audio to output.wav\n");
    
    // Clean up resources
    close_audio_device(output_device);
    free_audio_buffer(audio);
    free_text_content(content);
    echo_shutdown(echo);
    
    return 0;
}

Creating Custom Voice Templates

// Create and use a custom voice template
#include "echo13d.h"
#include 
#include 

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Create voice characteristics for warm, deep voice
    voice_characteristics_t characteristics;
    characteristics.pitch_base = 0.8f;      // Lower pitch
    characteristics.pitch_range = 0.9f;     // Slightly narrower pitch range
    characteristics.speech_rate = 0.9f;     // Slightly slower speech rate
    characteristics.timbre = 0.7f;          // Warm timbre
    characteristics.breathiness = 0.2f;     // Low breathiness
    characteristics.articulation = 0.8f;    // Clear articulation
    characteristics.resonance = 0.8f;       // Higher resonance (deeper)
    characteristics.age = AGE_ADULT;
    characteristics.gender = GENDER_MASCULINE;
    
    // Create voice template
    voice_template_t* warm_voice = create_voice_template(echo, "Warm Deep Voice", &characteristics);
    if (!warm_voice) {
        printf("Failed to create voice template\n");
        echo_shutdown(echo);
        return -1;
    }
    
    // Set language to English
    set_voice_template_language(warm_voice, LANGUAGE_ENGLISH);
    
    // Create a second voice with different characteristics
    voice_characteristics_t characteristics2;
    characteristics2.pitch_base = 1.2f;     // Higher pitch
    characteristics2.pitch_range = 1.3f;    // Wider pitch range
    characteristics2.speech_rate = 1.1f;    // Slightly faster speech rate
    characteristics2.timbre = 0.4f;         // Brighter timbre
    characteristics2.breathiness = 0.4f;    // More breathiness
    characteristics2.articulation = 0.9f;   // Very clear articulation
    characteristics2.resonance = 0.4f;      // Lower resonance (lighter)
    characteristics2.age = AGE_ADULT;
    characteristics2.gender = GENDER_FEMININE;
    
    // Create voice template
    voice_template_t* bright_voice = create_voice_template(echo, "Bright Voice", &characteristics2);
    if (!bright_voice) {
        printf("Failed to create second voice template\n");
        free_voice_template(warm_voice);
        echo_shutdown(echo);
        return -1;
    }
    
    // Set language to English
    set_voice_template_language(bright_voice, LANGUAGE_ENGLISH);
    
    // Create text content
    const char* text = "This is a demonstration of different voice templates with ECHO 13D.";
    text_content_t* content = create_text_content(text);
    if (!content) {
        printf("Failed to create text content\n");
        free_voice_template(bright_voice);
        free_voice_template(warm_voice);
        echo_shutdown(echo);
        return -1;
    }
    
    // Open audio output device
    audio_device_t* output_device = open_audio_output_device(DEFAULT_DEVICE);
    if (!output_device) {
        printf("Failed to open audio output device\n");
        free_text_content(content);
        free_voice_template(bright_voice);
        free_voice_template(warm_voice);
        echo_shutdown(echo);
        return -1;
    }
    
    // Synthesize and play with warm voice
    printf("Synthesizing with warm deep voice...\n");
    audio_buffer_t* audio1 = echo_synthesize_speech(echo, content, warm_voice, NULL);
    if (audio1) {
        play_audio(output_device, audio1);
        wait_for_playback_completion(output_device);
        free_audio_buffer(audio1);
    }
    
    // Short pause between voices
    sleep_ms(1000);
    
    // Synthesize and play with bright voice
    printf("Synthesizing with bright voice...\n");
    audio_buffer_t* audio2 = echo_synthesize_speech(echo, content, bright_voice, NULL);
    if (audio2) {
        play_audio(output_device, audio2);
        wait_for_playback_completion(output_device);
        free_audio_buffer(audio2);
    }
    
    // Clean up resources
    close_audio_device(output_device);
    free_text_content(content);
    free_voice_template(bright_voice);
    free_voice_template(warm_voice);
    echo_shutdown(echo);
    
    return 0;
}

Emotional Speech Synthesis

// Synthesize speech with different emotions
#include "echo13d.h"
#include 
#include 

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Get default voice template
    voice_template_t* voice = get_default_voice_template(echo);
    if (!voice) {
        printf("Failed to get default voice template\n");
        echo_shutdown(echo);
        return -1;
    }
    
    // Create text content
    const char* text = "This is a demonstration of emotional speech synthesis with ECHO 13D.";
    text_content_t* content = create_text_content(text);
    if (!content) {
        printf("Failed to create text content\n");
        echo_shutdown(echo);
        return -1;
    }
    
    // Open audio output device
    audio_device_t* output_device = open_audio_output_device(DEFAULT_DEVICE);
    if (!output_device) {
        printf("Failed to open audio output device\n");
        free_text_content(content);
        echo_shutdown(echo);
        return -1;
    }
    
    // Define emotion types to demonstrate
    emotion_type_t emotions[] = {
        EMOTION_NEUTRAL,
        EMOTION_HAPPY,
        EMOTION_SAD,
        EMOTION_ANGRY,
        EMOTION_SURPRISED
    };
    
    // Loop through emotions and synthesize speech
    for (int i = 0; i < sizeof(emotions) / sizeof(emotions[0]); i++) {
        // Create emotion parameters
        emotion_params_t* emotion = create_emotion_params(emotions[i]);
        if (!emotion) {
            printf("Failed to create emotion parameters\n");
            continue;
        }
        
        // Set emotion intensity to 0.8 (80%)
        emotion->intensity = 0.8f;
        
        // Print current emotion
        printf("Synthesizing with %s emotion...\n", get_emotion_name(emotions[i]));
        
        // Synthesize speech with emotion
        audio_buffer_t* audio = echo_synthesize_speech(echo, content, voice, emotion);
        if (!audio) {
            printf("Speech synthesis failed\n");
            free_emotion_params(emotion);
            continue;
        }
        
        // Play synthesized speech
        play_audio(output_device, audio);
        wait_for_playback_completion(output_device);
        
        // Free resources
        free_audio_buffer(audio);
        free_emotion_params(emotion);
        
        // Short pause between emotions
        sleep_ms(1000);
    }
    
    // Clean up resources
    close_audio_device(output_device);
    free_text_content(content);
    echo_shutdown(echo);
    
    return 0;
}

Integration Examples

Integration with Aurora AI

// Integrate ECHO 13D with Aurora AI
#include "echo13d.h"
#include "aurora.h"
#include 

// Voice interface callback function
void voice_response_callback(const char* text, void* user_data) {
    ECHO13D* echo = (ECHO13D*)user_data;
    
    // Get default voice template
    voice_template_t* voice = get_default_voice_template(echo);
    if (!voice) {
        printf("Failed to get default voice template\n");
        return;
    }
    
    // Create text content
    text_content_t* content = create_text_content(text);
    if (!content) {
        printf("Failed to create text content\n");
        return;
    }
    
    // Create emotion parameters (neutral with slight happiness)
    emotion_params_t* emotion = create_emotion_params(EMOTION_HAPPY);
    if (!emotion) {
        printf("Failed to create emotion parameters\n");
        free_text_content(content);
        return;
    }
    
    // Set emotion intensity to 0.4 (40%)
    emotion->intensity = 0.4f;
    
    // Synthesize speech
    audio_buffer_t* audio = echo_synthesize_speech(echo, content, voice, emotion);
    if (!audio) {
        printf("Speech synthesis failed\n");
        free_emotion_params(emotion);
        free_text_content(content);
        return;
    }
    
    // Open audio output device
    audio_device_t* output_device = open_audio_output_device(DEFAULT_DEVICE);
    if (!output_device) {
        printf("Failed to open audio output device\n");
        free_audio_buffer(audio);
        free_emotion_params(emotion);
        free_text_content(content);
        return;
    }
    
    // Play synthesized speech
    play_audio(output_device, audio);
    wait_for_playback_completion(output_device);
    
    // Clean up resources
    close_audio_device(output_device);
    free_audio_buffer(audio);
    free_emotion_params(emotion);
    free_text_content(content);
}

int main() {
    // Initialize DragonHeart
    DragonHeart* heart = dragonheart_init(NULL);
    if (!heart) {
        printf("Failed to initialize DragonHeart\n");
        return -1;
    }
    
    // Initialize NESH
    NESH* nesh = nesh_init(NULL);
    if (!nesh) {
        printf("Failed to initialize NESH\n");
        dragonheart_shutdown(heart);
        return -1;
    }
    
    // Initialize ECHO 13D
    echo_config_t echo_config;
    echo_config.dragonheart = heart;
    echo_config.nesh = nesh;
    echo_config.voice_config = NULL;
    echo_config.flags = 0;
    
    ECHO13D* echo = echo_init(&echo_config);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        nesh_shutdown(nesh);
        dragonheart_shutdown(heart);
        return -1;
    }
    
    // Initialize Aurora
    aurora_config_t aurora_config;
    aurora_config.dragonheart = heart;
    aurora_config.nesh = nesh;
    aurora_config.mode = AURORA_MODE_CONVERSATIONAL;
    
    Aurora* aurora = aurora_init(&aurora_config);
    if (!aurora) {
        printf("Failed to initialize Aurora\n");
        echo_shutdown(echo);
        nesh_shutdown(nesh);
        dragonheart_shutdown(heart);
        return -1;
    }
    
    // Integrate ECHO 13D with Aurora
    if (!integrate_echo_with_aurora(echo, aurora)) {
        printf("Failed to integrate ECHO 13D with Aurora\n");
        aurora_shutdown(aurora);
        echo_shutdown(echo);
        nesh_shutdown(nesh);
        dragonheart_shutdown(heart);
        return -1;
    }
    
    // Set voice response callback
    if (!aurora_set_voice_response_callback(aurora, voice_response_callback, echo)) {
        printf("Failed to set voice response callback\n");
        aurora_shutdown(aurora);
        echo_shutdown(echo);
        nesh_shutdown(nesh);
        dragonheart_shutdown(heart);
        return -1;
    }
    
    printf("Aurora voice interface ready\n");
    printf("Say something to begin conversing with Aurora...\n");
    
    // Main interaction loop
    while (1) {
        // Open audio input device
        audio_device_t* input_device = open_audio_input_device(DEFAULT_DEVICE);
        if (!input_device) {
            printf("Failed to open audio input device\n");
            break;
        }
        
        // Record voice input (5 seconds)
        audio_buffer_t* audio = record_audio(input_device, 5.0);
        if (!audio) {
            printf("Failed to record audio\n");
            close_audio_device(input_device);
            break;
        }
        
        // Process audio
        echo_result_t* result = echo_process_audio(echo, audio, PROCESS_FULL);
        if (!result) {
            printf("Failed to process audio\n");
            free_audio_buffer(audio);
            close_audio_device(input_device);
            break;
        }
        
        // If transcription is available, process as conversation
        if (result->transcription && result->transcription->text) {
            printf("You: %s\n", result->transcription->text);
            
            // Process input with Aurora
            aurora_conversation_input_t input;
            input.text = result->transcription->text;
            input.confidence = result->transcription->confidence;
            
            if (result->analysis && result->analysis->emotion) {
                input.emotion = result->analysis->emotion->primary_emotion;
                input.emotion_confidence = result->analysis->emotion->primary_confidence;
            } else {
                input.emotion = EMOTION_NEUTRAL;
                input.emotion_confidence = 1.0f;
            }
            
            // Process conversation input (response will come via callback)
            aurora_process_conversation(aurora, &input);
        }
        
        // Free resources
        free_echo_result(result);
        free_audio_buffer(audio);
        close_audio_device(input_device);
        
        // Check for exit command
        if (result->transcription && 
            (strstr(result->transcription->text, "exit") || 
             strstr(result->transcription->text, "quit"))) {
            printf("Exiting...\n");
            break;
        }
    }
    
    // Clean up resources
    aurora_shutdown(aurora);
    echo_shutdown(echo);
    nesh_shutdown(nesh);
    dragonheart_shutdown(heart);
    
    return 0;
}

Advanced Examples

Voice Conversion

// Convert voice from one template to another
#include "echo13d.h"
#include 

int main() {
    // Initialize ECHO 13D
    ECHO13D* echo = echo_init(NULL);
    if (!echo) {
        printf("Failed to initialize ECHO 13D\n");
        return -1;
    }
    
    // Open audio input device
    audio_device_t* input_device = open_audio_input_device(DEFAULT_DEVICE);
    if (!input_device) {
        printf("Failed to open audio input device\n");
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Please speak for 5 seconds to record your voice...\n");
    
    // Record voice sample (5 seconds)
    audio_buffer_t* source_audio = record_audio(input_device, 5.0);
    if (!source_audio) {
        printf("Failed to record audio\n");
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Process the audio to extract speech content
    echo_result_t* result = echo_process_audio(echo, source_audio, PROCESS_TRANSCRIPTION);
    if (!result || !result->transcription) {
        printf("Failed to transcribe speech\n");
        free_audio_buffer(source_audio);
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    printf("Transcription: %s\n", result->transcription->text);
    
    // Create text content from transcription
    text_content_t* content = create_text_content(result->transcription->text);
    if (!content) {
        printf("Failed to create text content\n");
        free_echo_result(result);
        free_audio_buffer(source_audio);
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Create a deep masculine voice
    voice_characteristics_t characteristics1;
    characteristics1.pitch_base = 0.7f;      // Lower pitch
    characteristics1.pitch_range = 0.8f;     // Narrower pitch range
    characteristics1.speech_rate = 0.9f;     // Slightly slower
    characteristics1.timbre = 0.8f;          // Warm timbre
    characteristics1.breathiness = 0.1f;     // Low breathiness
    characteristics1.articulation = 0.8f;    // Clear articulation
    characteristics1.resonance = 0.9f;       // High resonance (deep)
    characteristics1.age = AGE_ADULT;
    characteristics1.gender = GENDER_MASCULINE;
    
    // Create a bright feminine voice
    voice_characteristics_t characteristics2;
    characteristics2.pitch_base = 1.3f;      // Higher pitch
    characteristics2.pitch_range = 1.4f;     // Wider pitch range
    characteristics2.speech_rate = 1.1f;     // Slightly faster
    characteristics2.timbre = 0.3f;          // Bright timbre
    characteristics2.breathiness = 0.4f;     // More breathiness
    characteristics2.articulation = 0.9f;    // Clear articulation
    characteristics2.resonance = 0.3f;       // Low resonance (lighter)
    characteristics2.age = AGE_ADULT;
    characteristics2.gender = GENDER_FEMININE;
    
    // Create voice templates
    voice_template_t* deep_voice = create_voice_template(echo, "Deep Voice", &characteristics1);
    voice_template_t* bright_voice = create_voice_template(echo, "Bright Voice", &characteristics2);
    
    if (!deep_voice || !bright_voice) {
        printf("Failed to create voice templates\n");
        if (deep_voice) free_voice_template(deep_voice);
        if (bright_voice) free_voice_template(bright_voice);
        free_text_content(content);
        free_echo_result(result);
        free_audio_buffer(source_audio);
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Open audio output device
    audio_device_t* output_device = open_audio_output_device(DEFAULT_DEVICE);
    if (!output_device) {
        printf("Failed to open audio output device\n");
        free_voice_template(deep_voice);
        free_voice_template(bright_voice);
        free_text_content(content);
        free_echo_result(result);
        free_audio_buffer(source_audio);
        close_audio_device(input_device);
        echo_shutdown(echo);
        return -1;
    }
    
    // Get emotion from original speech
    emotion_params_t* emotion = NULL;
    if (result->analysis && result->analysis->emotion) {
        emotion = create_emotion_params(result->analysis->emotion->primary_emotion);
        if (emotion) {
            emotion->intensity = result->analysis->emotion->intensity;
        }
    }
    
    // Synthesize with deep voice
    printf("Converting to deep masculine voice...\n");
    audio_buffer_t* deep_audio = echo_synthesize_speech(echo, content, deep_voice, emotion);
    if (deep_audio) {
        play_audio(output_device, deep_audio);
        wait_for_playback_completion(output_device);
        save_audio_to_file(deep_audio, "deep_voice.wav");
        free_audio_buffer(deep_audio);
    }
    
    // Short pause between voices
    sleep_ms(1000);
    
    // Synthesize with bright voice
    printf("Converting to bright feminine voice...\n");
    audio_buffer_t* bright_audio = echo_synthesize_speech(echo, content, bright_voice, emotion);
    if (bright_audio) {
        play_audio(output_device, bright_audio);
        wait_for_playback_completion(output_device);
        save_audio_to_file(bright_audio, "bright_voice.wav");
        free_audio_buffer(bright_audio);
    }
    
    // Clean up resources
    if (emotion) free_emotion_params(emotion);
    close_audio_device(output_device);
    close_audio_device(input_device);
    free_voice_template(deep_voice);
    free_voice_template(bright_voice);
    free_text_content(content);
    free_echo_result(result);
    free_audio_buffer(source_audio);
    echo_shutdown(echo);
    
    printf("Voice conversion complete. Audio saved to deep_voice.wav and bright_voice.wav\n");
    
    return 0;
}

Full Example Repository

Visit our GitHub Repository for complete examples including more advanced applications of ECHO 13D SDK.

Next Steps