dct_lms_C.m
上传用户:kendun0711
上传日期:2007-06-03
资源大小:32k
文件大小:3k
- function [W, e, Lambda] = dct_lms_C(u, d, M, alpha, beta, gamma, verbose)
% function [W, e, Lambda] = dct_lms_C(u, d, M, alpha, beta, gamma, verbose)
%
% dct_lms_C.m - use DCT-LMS algorithm with recursions on C to estimate
- % optimum weight vectors for linear estimation
% (written for MATLAB 4.0).
%
% Reference: Ch.10 of Haykin, _Adaptive Filter Theory_, 3rd ed., 1996
%
%
% Input parameters:
% u : vector of inputs (real scalars)
% d : vector of desired outputs
% M : final order of predictor
% alpha : base step size for weight updates
% beta: : remembering factor for sliding DCT coefficient updates
% gamma : forgetting factor for estimated eigenvalue updates
% verbose : set to 1 for interactive processing
%
% Output parameters:
% W : row-wise matrix of Hermitian transposed weights
% at each iteration
% e : row vector of prediction errors at each time step
% Lambda : row-wise matrix of estimates of process eigenvalues
% at each iteration
- % Copyright (c) 1994-1999 by Paul Yee
% length of maximum number of timesteps that can be predicted
N = min(length(u),length(d));
% initialize weight matrix and associated parameters for LMS predictor
W = zeros(M, N+1);
Lambda = zeros(M, N);
m = [0:(M-1)]';
k = ones(M, 1); k(1) = 1 / sqrt(2);
W2M = exp(-j * pi / M);
W2M2 = exp(-j * pi / 2 / M);
W2Mm = exp(-j * pi * m / M);
W2M2m = exp(-j * pi * m / 2 / M);
F1 = W2M2m;
F2 = conj(W2M2m);
y = zeros(N, 1);
e = zeros(N, 1);
Lambda_n1 = zeros(M, 1);
n = M;
i = n-M+1:n;
C1n1 = W2M.^(m*(n-i)) * u(i);
C2n1 = W2M.^(m*(i-n+2*M-1)) * u(i);
% C1n1 = (-1).^m .* W2M2m .* C1n1;
% C2n1 = (-1).^m .* W2M2m .* C2n1;
for n = M+1:N
% C1(:, n) = F1 .* (beta * F1 .* C1n1 + u(n) * (-1).^m - beta * u(n-M) * ones(M, 1));
% C2(:, n) = F2 .* (beta * F2 .* C2n1 + u(n) * (-1).^m - beta * u(n-M) * ones(M, 1));
% C(:, n) = 1/2 * k .* (C1(:, n) + C2(:, n));
C1(:, n) = W2M.^m .* C1n1 + u(n) * ones(M, 1) - u(n-M) * (-1).^m;
% i = n-M+1:n;
% A1(:, n) = W2M.^(m*(n-i)) * u(i);
C2(:, n) = W2M.^(-m) .* (C2n1 + u(n) * ones(M, 1) - u(n-M) * (-1).^m);
% A2(:, n) = W2M.^(m*(i-n+2*M-1)) * u(i);
C(:, n) = 1/2 * k .* (-1).^m .* W2M2.^m .* (C1(:, n) + C2(:, n));
% C(:, n) = 1/2 * k .* (-1).^m .* W2M2.^m .* (A1(:, n) + A2(:, n));
% i = n:-1:n-M+1;
% C(:, n) = k .* (cos(m * (i-n+M-1/2) * pi / M) * u(i));
% compare with standard DCT algorithm
% un = u(n:-1:n-M+1);
% Cn(:, n) = dct(un);
C1n1 = C1(:, n);
C2n1 = C2(:, n);
% predict next sample and compute error
y(n) = W(:, n).' * C(:, n);
e(n) = d(n) - y(n);
if (verbose ~= 0)
disp(['time step ', int2str(n), ': mag. pred. err. = ', num2str(abs(e(n)))]);
end;
% adapt eigenvalue estimate and weight vectors, adjusting for
% offset of M+1 in starting index for eigenvalue step size
Lambda(:, n) = gamma * Lambda_n1 + 1 / (n-M) * (C(:, n).^2 - gamma * Lambda_n1);
Lambda_n1 = Lambda(:, n);
W(:, n+1) = W(:, n) + alpha ./ Lambda(:, n) .* C(:, n) * e(n);
end; % for n
% discard last update to W since no more data to predict
W = W(1:M, 1:N);
end; % dct_lms