source: proiecte/pmake3d/make3d_original/Make3dSingleImageStanford_version0.1/LearningCode/Inference/OldVersion/Ash_L1_Minimizer.m @ 37

Last change on this file since 37 was 37, checked in by (none), 14 years ago

Added original make3d

File size: 5.7 KB
Line 
1% *  This code was used in the following articles:
2% *  [1] Learning 3-D Scene Structure from a Single Still Image,
3% *      Ashutosh Saxena, Min Sun, Andrew Y. Ng,
4% *      In ICCV workshop on 3D Representation for Recognition (3dRR-07), 2007.
5% *      (best paper)
6% *  [2] 3-D Reconstruction from Sparse Views using Monocular Vision,
7% *      Ashutosh Saxena, Min Sun, Andrew Y. Ng,
8% *      In ICCV workshop on Virtual Representations and Modeling
9% *      of Large-scale environments (VRML), 2007.
10% *  [3] 3-D Depth Reconstruction from a Single Still Image,
11% *      Ashutosh Saxena, Sung H. Chung, Andrew Y. Ng.
12% *      International Journal of Computer Vision (IJCV), Aug 2007.
13% *  [6] Learning Depth from Single Monocular Images,
14% *      Ashutosh Saxena, Sung H. Chung, Andrew Y. Ng.
15% *      In Neural Information Processing Systems (NIPS) 18, 2005.
16% *
17% *  These articles are available at:
18% *  http://make3d.stanford.edu/publications
19% *
20% *  We request that you cite the papers [1], [3] and [6] in any of
21% *  your reports that uses this code.
22% *  Further, if you use the code in image3dstiching/ (multiple image version),
23% *  then please cite [2].
24% * 
25% *  If you use the code in third_party/, then PLEASE CITE and follow the
26% *  LICENSE OF THE CORRESPONDING THIRD PARTY CODE.
27% *
28% *  Finally, this code is for non-commercial use only.  For further
29% *  information and to obtain a copy of the license, see
30% *
31% *  http://make3d.stanford.edu/publications/code
32% *
33% *  Also, the software distributed under the License is distributed on an
34% * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
35% *  express or implied.   See the License for the specific language governing
36% *  permissions and limitations under the License.
37% *
38% */
39function [x, fail, info] = Ash_L1_Minimizer(A, b, tol, VERBOSE)
40% [x, fail] = function Ash_L1_Minimizer(A, b, tol, VERBOSE)
41% Solves:      x = minimize_x || Ax-b ||_1
42%              x \in Re^{Nx1},   A \in Re^{MxN},   b \in Re^{Mx1}
43% uses sigmoidal approximation to the gradient,
44% fail:    returns 1 if optimization fails.
45% info:    detailed results of optimization
46% tol:      tolerance
47% VERBOSE:  false by default
48%
49% Optimizatio parameters to be tweaked inside the function are;
50% ALPHA_MAX, alpha_init, mu, t0, beta
51
52fail = 0;
53
54if nargin < 2
55    disp('Type help Ash_L1_Minimizer');
56    fail = 1;
57    return;
58end
59if size(A,1) ~= size(b,1)
60    disp('Vectors of different size. Type help Ash_L1_Minimizer');
61    fail = 1;
62    return;
63end
64if size(A,1) < size(A,2)
65    disp('Multiple solutions possible');
66end
67
68if nargin < 3
69    tol = 1e-12;         % should be tweaked.
70end
71if nargin < 4
72    VERBOSE = false;
73end
74
75%load data_test_L1.mat
76
77%===Magic Numbers
78alpha_init = 1;
79mu = 1.01;
80ALPHA_MAX = 50000;
81MIN_ALPHA = 5000;
82MAX_ITER = 1000;  %1000;
83x_init = zeros(size(A,2),1);
84
85%initialize loop
86alpha = alpha_init;
87x = x_init;
88info.numIter = 0;
89
90[obj, g, H, actual] = f_Hessian_evaluate(A,b,x,alpha);
91d = - H \ g;    % the Newton direction
92info.newtonDecrement = d'*H*d;      % could also be g'*d
93
94% start iteration loop
95%while g'*d > tol && numIter < MAX_ITER && alpha < MIN_ALPHA
96while info.newtonDecrement > tol && info.numIter < MAX_ITER && alpha < MIN_ALPHA
97        t = find_t_byBacktracking(d, g, A, b, x, alpha);
98        x = x + t * d;
99        [obj, g, H, actual, IllConditionStop] = f_Hessian_evaluate(A,b,x,alpha);
100    if IllConditionStop
101        disp('Bad conditioning of Hessian....');
102        break;
103    end
104        d = - H \ g;    % Newton step; probably will be faster, if banded
105        alpha = min(alpha*mu, ALPHA_MAX);               % make the approximation more tight
106   
107    info.newtonDecrement = d'*H*d;      % could also be g'*d
108    if VERBOSE
109        disp(['Newton decrement. lambda^2 = d^THd = ' num2str(info.newtonDecrement)]);
110        disp(['Iteration number: ' num2str(numIter)]);
111        disp(['Alpha:     ' num2str(alpha)]);
112        disp(['Approximate ||Ax-b||_1 value = ' num2str(obj) ',    Exact value = ' num2str(actual)]);
113    end
114    info.numIter = info.numIter+1;
115end
116
117if info.numIter >= MAX_ITER && info.newtonDecrement > tol
118    disp('Error: Maximum Iterations reached or newtonDecrement is high or NaN somewhere.');
119    fail = 1;
120end
121
122info.alpha = alpha;
123info.IllConditionStop = IllConditionStop;
124
125return;
126
127
128
129%============ Function that evaluates the Hessian and the gradient ========
130function [obj, g, H, actual, stop] = f_Hessian_evaluate(A,b,x,alpha)
131% alpha is the sigmoidal approximation
132
133% objective
134[obj, actual] = f_obj_evaluate(A, b, x, alpha);
135
136logExpTerm = alpha*(A*x-b);
137expTerm = exp(logExpTerm);
138expTermNeg = exp(-logExpTerm);
139
140%evaluating the Hessian
141n = exp( logExpTerm - 2*log(1+expTerm) );
142nonZeroN = find(n~=0);
143stop = (length(find(n==0))/length(n)) > 0;
144%H = 2*alpha* A(nonZeroN,:)' * diag(n(nonZeroN)) * A(nonZeroN,:);
145H = 2*alpha* A' * diag(n) * A;
146
147% evaluating the gradient
148m = 1./(1+expTermNeg) - 1./(1+expTerm);
149%m = (expTerm-1) ./ (expTerm+1);
150%g = A(nonZeroN,:)'*diag(m(nonZeroN))*ones(size(A(nonZeroN,:),1),1);
151g = A'*diag(m)*ones(size(A,1),1);
152
153return;
154
155
156%====== Function that just evaluates ||Ax-b||_1 ========
157function [obj, actual] = f_obj_evaluate(A, b, x, alpha)
158
159expTerm = exp(alpha*(A*x-b));
160expTermNeg = exp(-alpha*(A*x-b));
161
162% Appromixate (smooth) objective value
163obj = ones(size(A,1),1)'* (1/alpha)* ( log(1+expTermNeg) + log(1+expTerm) );
164
165%actual value of ||Ax-b||_1
166actual = norm(A*x-b,1);
167
168return;
169
170
171
172% =========== Function to calculate step size t by back-tracking line search ====       
173function [t] = find_t_byBacktracking(d, g, A, b, x, alpha, beta, t0)
174
175if nargin < 7
176    beta = 0.5;     
177    t0 = 1;         % can be made faster here
178end
179
180t = t0;
181f_x = f_obj_evaluate(A,b,x,alpha);
182gd = g'*d;
183
184% Refer Convex Optimization, Boyd page 464
185while f_obj_evaluate(A,b,x+t*d,alpha) > f_x + t*gd
186    t = t*beta;
187end
188
189return;
Note: See TracBrowser for help on using the repository browser.