[37] | 1 | % * This code was used in the following articles:
|
---|
| 2 | % * [1] Learning 3-D Scene Structure from a Single Still Image,
|
---|
| 3 | % * Ashutosh Saxena, Min Sun, Andrew Y. Ng,
|
---|
| 4 | % * In ICCV workshop on 3D Representation for Recognition (3dRR-07), 2007.
|
---|
| 5 | % * (best paper)
|
---|
| 6 | % * [2] 3-D Reconstruction from Sparse Views using Monocular Vision,
|
---|
| 7 | % * Ashutosh Saxena, Min Sun, Andrew Y. Ng,
|
---|
| 8 | % * In ICCV workshop on Virtual Representations and Modeling
|
---|
| 9 | % * of Large-scale environments (VRML), 2007.
|
---|
| 10 | % * [3] 3-D Depth Reconstruction from a Single Still Image,
|
---|
| 11 | % * Ashutosh Saxena, Sung H. Chung, Andrew Y. Ng.
|
---|
| 12 | % * International Journal of Computer Vision (IJCV), Aug 2007.
|
---|
| 13 | % * [6] Learning Depth from Single Monocular Images,
|
---|
| 14 | % * Ashutosh Saxena, Sung H. Chung, Andrew Y. Ng.
|
---|
| 15 | % * In Neural Information Processing Systems (NIPS) 18, 2005.
|
---|
| 16 | % *
|
---|
| 17 | % * These articles are available at:
|
---|
| 18 | % * http://make3d.stanford.edu/publications
|
---|
| 19 | % *
|
---|
| 20 | % * We request that you cite the papers [1], [3] and [6] in any of
|
---|
| 21 | % * your reports that uses this code.
|
---|
| 22 | % * Further, if you use the code in image3dstiching/ (multiple image version),
|
---|
| 23 | % * then please cite [2].
|
---|
| 24 | % *
|
---|
| 25 | % * If you use the code in third_party/, then PLEASE CITE and follow the
|
---|
| 26 | % * LICENSE OF THE CORRESPONDING THIRD PARTY CODE.
|
---|
| 27 | % *
|
---|
| 28 | % * Finally, this code is for non-commercial use only. For further
|
---|
| 29 | % * information and to obtain a copy of the license, see
|
---|
| 30 | % *
|
---|
| 31 | % * http://make3d.stanford.edu/publications/code
|
---|
| 32 | % *
|
---|
| 33 | % * Also, the software distributed under the License is distributed on an
|
---|
| 34 | % * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
---|
| 35 | % * express or implied. See the License for the specific language governing
|
---|
| 36 | % * permissions and limitations under the License.
|
---|
| 37 | % *
|
---|
| 38 | % */
|
---|
| 39 | function [x, fail, info] = Ash_L1_Minimizer(A, b, tol, VERBOSE) |
---|
| 40 | % [x, fail] = function Ash_L1_Minimizer(A, b, tol, VERBOSE) |
---|
| 41 | % Solves: x = minimize_x || Ax-b ||_1 |
---|
| 42 | % x \in Re^{Nx1}, A \in Re^{MxN}, b \in Re^{Mx1} |
---|
| 43 | % uses sigmoidal approximation to the gradient, |
---|
| 44 | % fail: returns 1 if optimization fails. |
---|
| 45 | % info: detailed results of optimization |
---|
| 46 | % tol: tolerance |
---|
| 47 | % VERBOSE: false by default |
---|
| 48 | % |
---|
| 49 | % Optimizatio parameters to be tweaked inside the function are; |
---|
| 50 | % ALPHA_MAX, alpha_init, mu, t0, beta |
---|
| 51 | |
---|
| 52 | fail = 0; |
---|
| 53 | |
---|
| 54 | if nargin < 2 |
---|
| 55 | disp('Type help Ash_L1_Minimizer'); |
---|
| 56 | fail = 1; |
---|
| 57 | return; |
---|
| 58 | end |
---|
| 59 | if size(A,1) ~= size(b,1) |
---|
| 60 | disp('Vectors of different size. Type help Ash_L1_Minimizer'); |
---|
| 61 | fail = 1; |
---|
| 62 | return; |
---|
| 63 | end |
---|
| 64 | if size(A,1) < size(A,2) |
---|
| 65 | disp('Multiple solutions possible'); |
---|
| 66 | end |
---|
| 67 | |
---|
| 68 | if nargin < 3 |
---|
| 69 | tol = 1e-12; % should be tweaked. |
---|
| 70 | end |
---|
| 71 | if nargin < 4 |
---|
| 72 | VERBOSE = false; |
---|
| 73 | end |
---|
| 74 | |
---|
| 75 | %load data_test_L1.mat |
---|
| 76 | |
---|
| 77 | %===Magic Numbers |
---|
| 78 | alpha_init = 1; |
---|
| 79 | mu = 1.01; |
---|
| 80 | ALPHA_MAX = 50000; |
---|
| 81 | MIN_ALPHA = 5000; |
---|
| 82 | MAX_ITER = 1000; %1000; |
---|
| 83 | x_init = zeros(size(A,2),1); |
---|
| 84 | |
---|
| 85 | %initialize loop |
---|
| 86 | alpha = alpha_init; |
---|
| 87 | x = x_init; |
---|
| 88 | info.numIter = 0; |
---|
| 89 | |
---|
| 90 | [obj, g, H, actual] = f_Hessian_evaluate(A,b,x,alpha); |
---|
| 91 | d = - H \ g; % the Newton direction |
---|
| 92 | info.newtonDecrement = d'*H*d; % could also be g'*d |
---|
| 93 | |
---|
| 94 | % start iteration loop |
---|
| 95 | %while g'*d > tol && numIter < MAX_ITER && alpha < MIN_ALPHA |
---|
| 96 | while info.newtonDecrement > tol && info.numIter < MAX_ITER && alpha < MIN_ALPHA |
---|
| 97 | t = find_t_byBacktracking(d, g, A, b, x, alpha); |
---|
| 98 | x = x + t * d; |
---|
| 99 | [obj, g, H, actual, IllConditionStop] = f_Hessian_evaluate(A,b,x,alpha); |
---|
| 100 | if IllConditionStop |
---|
| 101 | disp('Bad conditioning of Hessian....'); |
---|
| 102 | break; |
---|
| 103 | end |
---|
| 104 | d = - H \ g; % Newton step; probably will be faster, if banded |
---|
| 105 | alpha = min(alpha*mu, ALPHA_MAX); % make the approximation more tight |
---|
| 106 | |
---|
| 107 | info.newtonDecrement = d'*H*d; % could also be g'*d |
---|
| 108 | if VERBOSE |
---|
| 109 | disp(['Newton decrement. lambda^2 = d^THd = ' num2str(info.newtonDecrement)]); |
---|
| 110 | disp(['Iteration number: ' num2str(numIter)]); |
---|
| 111 | disp(['Alpha: ' num2str(alpha)]); |
---|
| 112 | disp(['Approximate ||Ax-b||_1 value = ' num2str(obj) ', Exact value = ' num2str(actual)]); |
---|
| 113 | end |
---|
| 114 | info.numIter = info.numIter+1; |
---|
| 115 | end |
---|
| 116 | |
---|
| 117 | if info.numIter >= MAX_ITER && info.newtonDecrement > tol |
---|
| 118 | disp('Error: Maximum Iterations reached or newtonDecrement is high or NaN somewhere.'); |
---|
| 119 | fail = 1; |
---|
| 120 | end |
---|
| 121 | |
---|
| 122 | info.alpha = alpha; |
---|
| 123 | info.IllConditionStop = IllConditionStop; |
---|
| 124 | |
---|
| 125 | return; |
---|
| 126 | |
---|
| 127 | |
---|
| 128 | |
---|
| 129 | %============ Function that evaluates the Hessian and the gradient ======== |
---|
| 130 | function [obj, g, H, actual, stop] = f_Hessian_evaluate(A,b,x,alpha) |
---|
| 131 | % alpha is the sigmoidal approximation |
---|
| 132 | |
---|
| 133 | % objective |
---|
| 134 | [obj, actual] = f_obj_evaluate(A, b, x, alpha); |
---|
| 135 | |
---|
| 136 | logExpTerm = alpha*(A*x-b); |
---|
| 137 | expTerm = exp(logExpTerm); |
---|
| 138 | expTermNeg = exp(-logExpTerm); |
---|
| 139 | |
---|
| 140 | %evaluating the Hessian |
---|
| 141 | n = exp( logExpTerm - 2*log(1+expTerm) ); |
---|
| 142 | nonZeroN = find(n~=0); |
---|
| 143 | stop = (length(find(n==0))/length(n)) > 0; |
---|
| 144 | %H = 2*alpha* A(nonZeroN,:)' * diag(n(nonZeroN)) * A(nonZeroN,:); |
---|
| 145 | H = 2*alpha* A' * diag(n) * A; |
---|
| 146 | |
---|
| 147 | % evaluating the gradient |
---|
| 148 | m = 1./(1+expTermNeg) - 1./(1+expTerm); |
---|
| 149 | %m = (expTerm-1) ./ (expTerm+1); |
---|
| 150 | %g = A(nonZeroN,:)'*diag(m(nonZeroN))*ones(size(A(nonZeroN,:),1),1); |
---|
| 151 | g = A'*diag(m)*ones(size(A,1),1); |
---|
| 152 | |
---|
| 153 | return; |
---|
| 154 | |
---|
| 155 | |
---|
| 156 | %====== Function that just evaluates ||Ax-b||_1 ======== |
---|
| 157 | function [obj, actual] = f_obj_evaluate(A, b, x, alpha) |
---|
| 158 | |
---|
| 159 | expTerm = exp(alpha*(A*x-b)); |
---|
| 160 | expTermNeg = exp(-alpha*(A*x-b)); |
---|
| 161 | |
---|
| 162 | % Appromixate (smooth) objective value |
---|
| 163 | obj = ones(size(A,1),1)'* (1/alpha)* ( log(1+expTermNeg) + log(1+expTerm) ); |
---|
| 164 | |
---|
| 165 | %actual value of ||Ax-b||_1 |
---|
| 166 | actual = norm(A*x-b,1); |
---|
| 167 | |
---|
| 168 | return; |
---|
| 169 | |
---|
| 170 | |
---|
| 171 | |
---|
| 172 | % =========== Function to calculate step size t by back-tracking line search ==== |
---|
| 173 | function [t] = find_t_byBacktracking(d, g, A, b, x, alpha, beta, t0) |
---|
| 174 | |
---|
| 175 | if nargin < 7 |
---|
| 176 | beta = 0.5; |
---|
| 177 | t0 = 1; % can be made faster here |
---|
| 178 | end |
---|
| 179 | |
---|
| 180 | t = t0; |
---|
| 181 | f_x = f_obj_evaluate(A,b,x,alpha); |
---|
| 182 | gd = g'*d; |
---|
| 183 | |
---|
| 184 | % Refer Convex Optimization, Boyd page 464 |
---|
| 185 | while f_obj_evaluate(A,b,x+t*d,alpha) > f_x + t*gd |
---|
| 186 | t = t*beta; |
---|
| 187 | end |
---|
| 188 | |
---|
| 189 | return; |
---|