% Test that fancy routine matches the behavior of the obvious Matlab routine % (when numerics aren't a problem) over a wide range of weird ND-array sizes % (This is really a test of ndhelper.m) sizes = {[1 3], [5 1], [10 3], [0], [0 9], [3 0], [3 1 4], [1 2 3], [1 1 1 4], [1 1 0 5]}; for ii = 1:numel(sizes) sz = sizes{ii}; A = rand(sz); testclose(logsumexp2(A), log(sum(exp(A)))); for dim = 1:length(sz) if ~testclose(logsumexp2(A, dim), log(sum(exp(A), dim))) sz size(logsumexp2(A, dim)) %size(ls_logsumexp(A, dim)) % LS version crashes! size(log(sum(exp(A), dim))) keyboard end end end % Test that answers are ok when numerics are horrible by comparing to lightspeed LOGSUMEXP A = [-Inf -Inf -3000 -3000 -Inf -600 -599 -10 5 1000 999 Inf Inf 1000 Inf NaN 5 2]'; lcs = zeros(size(A)); lcs2 = zeros(size(A)); for ii = 1:length(A) lcs(ii) = logsumexp2(A(1:ii)); end for ii = 1:length(A) lcs2(ii) = ls_logsumexp(A(1:ii)); end % If using Tom Minka's logsumexp, it fails (or at least used to fail) to deal % with NaNs correctly if there are Inf's in the array. I'll fix that here: if ~all(isnan(lcs2(end-2:end))) fprintf('NOTE: lightspeed didn''t produce NaN''s that I think it should have so I ''fixed'' them\n'); lcs2(end-2:end) = NaN; end testclose(lcs, lcs2); fprintf('A warning (but not failure) on the previous line about NaN''s was expected.\n'); % Speed test from lightspeed. When I tried it, the new version is a bit faster, % although it depends on the size of the input. x = rand(1000,1)*1000; % Get a speed benefit for a large spread of inputs. tic; for iter = 1:1000 logsumexp2(x); end; toc tic; for iter = 1:1000 ls_logsumexp(x); end; toc test_exit