tg2sip/libtgvoip/webrtc_dsp/modules/audio_processing/audio_buffer.h

165 lines
5.8 KiB
C
Raw Normal View History

Squashed 'libtgvoip/' changes from 6053cf5..cfd62e6 cfd62e6 Why did it change the OS X project 3a58a16 2.4.3 c4a48b3 Updated OS X project 564eada Fix #63 4f64e2e fixes 0c732e2 fixes 12e76ed better logging f015b79 Merge pull request #62 from xvitaly/big-endian a1df90f Set preferred audio session parameters on iOS 59a975b Fixes 8fd89fc Fixes, mic level testing and volume adjustment 243acfa Backported WebRTC upstream patch with Big Endian support. fed3bb7 Detect when proxy does not support UDP and persist that across calls a7546d4 Merge commit '6d03dd9ae4bf48d7344341cdd2d055ebd3a6a42e' into public 6d03dd9 version 69adf70 Use server config for APM + iOS crash fix 0b42ec8 Update iOS project f1b9e63 packet logging beeea45 I apparently still suck at C++ memory management 24fceba Update project 7f54b91 crash fix f85ce99 Save more data in data saving mode f4c4f79 Collect packet stats and accept json string for server config 78e584c New protocol version: optimized packet size 8cf9177 Fixed build on iOS 9dd089d fixed build on android 5caaaaf Updated WebRTC APM cc0cf35 fixed deadlock 02f4835 Rearranged VoIPController methods and added sections 912f73d Updated OS X project 39376df Fixed audio glitches on Windows dfe1f03 Updated project 81daf3f fix 296187a Merge pull request #58 from telegramdesktop/tdesktop 44956ac Merge pull request #57 from UnigramDev/public fb0a2b0 Fix build for Linux. d6cf1b7 Updated UWP wrapper 0f06289 Merge branch 'public' of github.com:grishka/libtgvoip into public dcfad91 Fix #54 162f447 Merge pull request #56 from telegramdesktop/tdesktop a7ee511 Merge remote-tracking branch 'origin/tdesktop' into HEAD 467b148 Removed unused files b1a0b3d 2.3 9b292fd Fix warning in Xcode 10. 8d8522a Merge pull request #53 from UnigramDev/public 646f7d6 Merge branch 'public' into public 14d782b Fixes 68acf59 Added GetSignalBarsCount and GetConnectionState to CXWrapper 761c586 Added GetStats to CXWrapper f643b02 Prevent crash if UWP WASAPI devices aren't found b2ac10e Fixed UWP project 9a1ec51 Fixed build for Windows Phone, fixed some warnings 4aea54f fix git-subtree-dir: libtgvoip git-subtree-split: cfd62e66a825348ac51f49e5d20bf8827fef7a38
2019-02-06 18:22:38 +00:00
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
#define MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "api/audio/audio_frame.h"
#include "common_audio/channel_buffer.h"
#include "modules/audio_processing/include/audio_processing.h"
#include "rtc_base/gtest_prod_util.h"
namespace webrtc {
class IFChannelBuffer;
class PushSincResampler;
class SplittingFilter;
enum Band { kBand0To8kHz = 0, kBand8To16kHz = 1, kBand16To24kHz = 2 };
class AudioBuffer {
public:
// TODO(ajm): Switch to take ChannelLayouts.
AudioBuffer(size_t input_num_frames,
size_t num_input_channels,
size_t process_num_frames,
size_t num_process_channels,
size_t output_num_frames);
virtual ~AudioBuffer();
size_t num_channels() const;
void set_num_channels(size_t num_channels);
size_t num_frames() const;
size_t num_frames_per_band() const;
size_t num_keyboard_frames() const;
size_t num_bands() const;
// Returns a pointer array to the full-band channels.
// Usage:
// channels()[channel][sample].
// Where:
// 0 <= channel < |num_proc_channels_|
// 0 <= sample < |proc_num_frames_|
int16_t* const* channels();
const int16_t* const* channels_const() const;
float* const* channels_f();
const float* const* channels_const_f() const;
// Returns a pointer array to the bands for a specific channel.
// Usage:
// split_bands(channel)[band][sample].
// Where:
// 0 <= channel < |num_proc_channels_|
// 0 <= band < |num_bands_|
// 0 <= sample < |num_split_frames_|
int16_t* const* split_bands(size_t channel);
const int16_t* const* split_bands_const(size_t channel) const;
float* const* split_bands_f(size_t channel);
const float* const* split_bands_const_f(size_t channel) const;
// Returns a pointer array to the channels for a specific band.
// Usage:
// split_channels(band)[channel][sample].
// Where:
// 0 <= band < |num_bands_|
// 0 <= channel < |num_proc_channels_|
// 0 <= sample < |num_split_frames_|
int16_t* const* split_channels(Band band);
const int16_t* const* split_channels_const(Band band) const;
float* const* split_channels_f(Band band);
const float* const* split_channels_const_f(Band band) const;
// Returns a pointer to the ChannelBuffer that encapsulates the full-band
// data.
ChannelBuffer<int16_t>* data();
const ChannelBuffer<int16_t>* data() const;
ChannelBuffer<float>* data_f();
const ChannelBuffer<float>* data_f() const;
// Returns a pointer to the ChannelBuffer that encapsulates the split data.
ChannelBuffer<int16_t>* split_data();
const ChannelBuffer<int16_t>* split_data() const;
ChannelBuffer<float>* split_data_f();
const ChannelBuffer<float>* split_data_f() const;
// Returns a pointer to the low-pass data downmixed to mono. If this data
// isn't already available it re-calculates it.
const int16_t* mixed_low_pass_data();
const int16_t* low_pass_reference(int channel) const;
const float* keyboard_data() const;
void set_activity(AudioFrame::VADActivity activity);
AudioFrame::VADActivity activity() const;
// Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
// Use for float deinterleaved data.
void CopyFrom(const float* const* data, const StreamConfig& stream_config);
void CopyTo(const StreamConfig& stream_config, float* const* data);
void CopyLowPassToReference();
// Splits the signal into different bands.
void SplitIntoFrequencyBands();
// Recombine the different bands into one signal.
void MergeFrequencyBands();
private:
FRIEND_TEST_ALL_PREFIXES(AudioBufferTest,
SetNumChannelsSetsChannelBuffersNumChannels);
// Called from DeinterleaveFrom() and CopyFrom().
void InitForNewData();
// The audio is passed into DeinterleaveFrom() or CopyFrom() with input
// format (samples per channel and number of channels).
const size_t input_num_frames_;
const size_t num_input_channels_;
// The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
// format.
const size_t proc_num_frames_;
const size_t num_proc_channels_;
// The audio is returned by InterleaveTo() and CopyTo() with output samples
// per channels and the current number of channels. This last one can be
// changed at any time using set_num_channels().
const size_t output_num_frames_;
size_t num_channels_;
size_t num_bands_;
size_t num_split_frames_;
bool mixed_low_pass_valid_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
const float* keyboard_data_;
std::unique_ptr<IFChannelBuffer> data_;
std::unique_ptr<IFChannelBuffer> split_data_;
std::unique_ptr<SplittingFilter> splitting_filter_;
std::unique_ptr<ChannelBuffer<int16_t>> mixed_low_pass_channels_;
std::unique_ptr<ChannelBuffer<int16_t>> low_pass_reference_channels_;
std::unique_ptr<IFChannelBuffer> input_buffer_;
std::unique_ptr<IFChannelBuffer> output_buffer_;
std::unique_ptr<ChannelBuffer<float>> process_buffer_;
std::vector<std::unique_ptr<PushSincResampler>> input_resamplers_;
std::vector<std::unique_ptr<PushSincResampler>> output_resamplers_;
};
} // namespace webrtc
#endif // MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_