import pyaudio
import wave
import pprint
import argparse
import datetime
import io
import json
import os
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from google.cloud import storage
import sys
from oauth2client.service_account import ServiceAccountCredentials
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 10
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording..................")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'C:/Users/Dave/Desktop/mizu/Project Mizu-7e2ecd8c5804.json'
bucket_name = "C:/Users/Dave/Desktop/mizu/output.wav"
source_file_name = "gcspeechstorage"
destination_blob_name = "output.wav"
gcs_uri = "gs://gcspeechstorage/output.wav"
def create_bucket(bucket_name):
"""Creates a new bucket."""
storage_client = storage.Client()
bucket = storage_client.create_bucket(bucket_name)
print('Bucket {} created'.format(bucket.name))
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print('File {} uploaded to {}.'.format(
source_file_name,
destination_blob_name))
# [START speech_transcribe_async_gcs]
def transcribe_gcs(gcs_uri):
"""Asynchronously transcribes the audio file specified by the gcs_uri."""
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
client = speech.SpeechClient()
audio = types.RecognitionAudio(uri=gcs_uri)
config = types.RecognitionConfig(
encoding= 'LINEAR16',
sample_rate_hertz=44100,
language_code='en-US')
operation = client.long_running_recognize(config, audio)
print('Waiting for operation to complete...')
response = operation.result(timeout=90)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u'Transcript: {}'.format(result.alternatives[0].transcript))
transcribedSpeechFile = open('speechToAnalyze.txt', 'a+') # this is where a text file is made with the transcribed speech
transcribedSpeechFile.write(format(result.alternatives[0].transcript))
transcribedSpeechFile.close()
print('Confidence: {}'.format(result.alternatives[0].confidence))
# [END speech_transcribe_async_gcs]
if __name__ == '__main__':
transcribe_gcs(gcs_uri)
audio_rec = open('speechToAnalyze.txt', 'r')
sid = SentimentIntensityAnalyzer()
for sentence in audio_rec:
ss = sid.polarity_scores(sentence)
for k in ss:
print('{0}: {1}, '.format(k, ss[k]), end='')
print()