23 #ifndef O2SCL_ANNEAL_PARA_H 24 #define O2SCL_ANNEAL_PARA_H 30 #include <o2scl/anneal_gsl.h> 31 #include <o2scl/vector.h> 33 #ifndef DOXYGEN_NO_O2NS 77 collect_all_ranks=
true;
83 std::vector<rng_gsl> vrng;
87 virtual int step(vec_t &x, vec_t &sx,
int nvar,
size_t ithread) {
89 for(
int i=0;i<nvar;i++) {
90 double u=vrng[ithread].random();
99 sx[i]=(2.0*u-1.0)*step_i+x[i];
108 virtual int mmin(
size_t nv, vec_t &x0,
double &fmin,
109 std::vector<func_t> &func) {
113 O2SCL_ERR2(
"Tried to minimize over zero variables ",
120 std::cout <<
"mcmc_para::mcmc(): Not enough functions for " 121 << n_threads <<
" threads. Setting n_threads to " 122 << func.size() <<
"." << std::endl;
124 n_threads=func.size();
129 omp_set_num_threads(n_threads);
132 n_threads=omp_get_num_threads();
140 start_time=MPI_Wtime();
148 std::vector<vec_t> x(n_threads), new_x(n_threads);
149 std::vector<vec_t> old_x(n_threads), best_x(n_threads);
151 ubvector E(n_threads), new_E(n_threads), best_E(n_threads);
152 ubvector old_E(n_threads), r(n_threads);
159 vrng.resize(n_threads);
162 unsigned long int s=time(0);
164 vrng[it].set_seed(s+it);
171 #pragma omp parallel default(shared) 181 new_x[it].resize(nv);
182 best_x[it].resize(nv);
183 old_x[it].resize(nv);
186 for(
size_t j=0;j<nv;j++) {
192 E[it]=func[it](nv,x[it]);
205 #pragma omp parallel default(shared) 214 for(
size_t j=0;j<nv;j++) old_x[it][j]=x[it][j];
217 for (
int i=0;i<this->
ntrial;++i) {
219 step(x[it],new_x[it],nv,it);
221 new_E[it]=func[it](nv,new_x[it]);
227 if (max_time>0.0 && elapsed>max_time) {
233 if(new_E[it]<=best_E[it]) {
234 for(
size_t j=0;j<nv;j++) best_x[it][j]=new_x[it][j];
235 best_E[it]=new_E[it];
241 if (new_E[it]<E[it]) {
242 for(
size_t j=0;j<nv;j++) x[it][j]=new_x[it][j];
246 r[it]=vrng[it].random();
247 if (r[it] < exp(-(new_E[it]-E[it])/(this->
boltz*T))) {
248 for(
size_t j=0;j<nv;j++) x[it][j]=new_x[it][j];
262 for(
size_t iv=0;iv<nv;iv++) {
263 x0[iv]=best_x[0][iv];
266 if (best_E[i]<fmin) {
268 for(
size_t iv=0;iv<nv;iv++) {
269 x0[iv]=best_x[i][iv];
275 this->
print_iter(nv,x0,fmin,iter,T,
"anneal_gsl");
288 std::vector<int> done_arr(n_threads);
291 #pragma omp parallel default(shared) 299 next(nv,old_x[it],old_E[it],x[it],E[it],T,nmoves,
300 x0,fmin,done_arr[it]);
311 if (done_arr[it]==0) done=
false;
319 if (collect_all_ranks) {
321 int mpi_rank, mpi_size;
323 MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
324 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
328 vector<double> x0_new(nv);
330 if (mpi_rank<size-1) {
334 MPI_Recv(&fmin_new,1,MPI_DOUBLE,mpi_rank+1,tag,MPI_COMM_WORLD,
337 MPI_Recv(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank+1,tag,MPI_COMM_WORLD,
342 for(
size_t ik=0;ik<nv;ik++) {
347 for(
size_t ik=0;ik<nv;ik++) {
353 if (mpi_size>1 && mpi_rank>0) {
356 for(
size_t ik=0;ik<nv;ik++) x0_new[ik]=x0[ik];
359 MPI_Send(&fmin,1,MPI_DOUBLE,mpi_rank-1,0,MPI_COMM_WORLD);
360 MPI_Send(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank-1,1,MPI_COMM_WORLD);
363 if (mpi_size>1 && mpi_rank>0) {
366 MPI_Recv(&(fmin_new),1,MPI_DOUBLE,mpi_rank-1,tag,MPI_COMM_WORLD,
369 MPI_Recv(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank-1,tag,MPI_COMM_WORLD,
374 for(
size_t ik=0;ik<nv;ik++) {
381 if (mpi_rank<size-1) {
383 MPI_Send(&fmin,1,MPI_DOUBLE,mpi_rank+1,2,MPI_COMM_WORLD);
384 MPI_Send(&(x0_new[0]),nv,MPI_DOUBLE,mpi_rank+1,3,MPI_COMM_WORLD);
389 MPI_Barrier(MPI_COMM_WORLD);
401 virtual int next(
size_t nvar, vec_t &x_old,
double min_old,
402 vec_t &x_new,
double min_new,
double &T,
403 size_t n_moves, vec_t &best_x,
double best_E,
406 if (T/this->T_dec<this->
tol_abs) {
415 for(
size_t i=0;i<nvar;i++) {
425 virtual int mmin(
size_t nv, vec_t &x0,
double &fmin,
428 omp_set_num_threads(n_threads);
431 n_threads=omp_get_num_threads();
436 std::vector<func_t> vf(n_threads);
440 return mmin(nv,x0,fmin,vf);
445 virtual const char *
type() {
return "anneal_para"; }
449 #ifndef DOXYGEN_NO_O2NS std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct
Multi-dimensional function typedef.
double start_time
The MPI starting time.
The main O<span style='position: relative; top: 0.3em; font-size: 0.8em'>2</span>scl O$_2$scl names...
ubvector step_vec
Vector of step sizes.
size_t n_threads
The number of OpenMP threads.
invalid argument supplied by user
virtual int step(vec_t &x, vec_t &sx, int nvar, size_t ithread)
Make a step to a new attempted minimum.
virtual int mmin(size_t nv, vec_t &x0, double &fmin, func_t &func)
Desc.
int verbose
Output control.
double T_dec
Factor to decrease temperature by (default 1.5)
double boltz
Boltzmann factor (default 1.0).
bool collect_all_ranks
If true, obtain the global minimum over all MPI ranks (default true)
#define O2SCL_ERR2(d, d2, n)
Set an error, two-string version.
virtual int start(size_t nvar, double &T)
Setup initial temperature and stepsize.
Multidimensional minimization by simulated annealing (OpenMP/MPI version)
double min_step_ratio
Ratio between minimum step size and tol_abs (default 100.0)
double tol_abs
The independent variable tolerance.
Multidimensional minimization by simulated annealing (GSL)
virtual int next(size_t nvar, vec_t &x_old, double min_old, vec_t &x_new, double min_new, double &T, size_t n_moves, vec_t &best_x, double best_E, int &finished)
Determine how to change the minimization for the next iteration.
virtual int print_iter(size_t nv, vec_t &x, double y, int iter, double tptr, std::string comment)
Print out iteration information.
virtual const char * type()
Return string denoting type ("anneal_para")
double max_time
The maximum time.
double step_dec
Factor to decrease step size by (default 1.5)
double step_norm
Normalization for step.
virtual int mmin(size_t nv, vec_t &x0, double &fmin, std::vector< func_t > &func)
Desc.
int ntrial
Maximum number of iterations.