Address review

This commit is contained in:
Ye Luo 2022-08-25 13:14:43 -05:00
parent 59dce50161
commit a462563d44
4 changed files with 47 additions and 44 deletions

View File

@ -84,6 +84,7 @@ public:
void prepareSampling(int num_params, int num_samples) override
{
//FIXME it should respect num_samples and avoid relying on threads.
engine_.prepareStorage(omp_get_max_threads(), num_params);
der_rat_samp.resize(num_params + 1, 0.0);
@ -108,6 +109,7 @@ public:
le_der_samp[j + 1] = static_cast<FullPrecValue>(dhpsioverpsi_array.getValue(j, local_index)) +
le_der_samp[0] * static_cast<FullPrecValue>(dlogpsi_array.getValue(j, local_index));
}
//FIXME it should respect base_sample_index and avoid relying on threads.
int ip = omp_get_thread_num();
engine_.takeSample(ip, der_rat_samp, le_der_samp, le_der_samp, 1.0, 1.0);
}

View File

@ -122,10 +122,10 @@ QMCFixedSampleLinearOptimizeBatched::QMCFixedSampleLinearOptimizeBatched(
m_param.add(LMY_options_.num_shifts, "LMY_options_.num_shifts");
m_param.add(LMY_options_.cost_increase_tol, "LMY_options_.cost_increase_tol");
m_param.add(LMY_options_.target_shift_i, "LMY_options_.target_shift_i");
m_param.add(LMY_options_.filter_param_, "filter_param");
m_param.add(LMY_options_.ratio_threshold_, "deriv_threshold");
m_param.add(LMY_options_.store_samples_, "store_samples");
m_param.add(LMY_options_.filter_info_, "filter_info");
m_param.add(LMY_options_.filter_param, "filter_param");
m_param.add(LMY_options_.ratio_threshold, "deriv_threshold");
m_param.add(LMY_options_.store_samples, "store_samples");
m_param.add(LMY_options_.filter_info, "filter_info");
#ifdef HAVE_LMY_ENGINE
@ -316,15 +316,15 @@ bool QMCFixedSampleLinearOptimizeBatched::run()
}
// if requested, perform the update via the adaptive three-shift or single-shift method
if (LMY_options_.current_optimizer_type_ == OptimizerType::ADAPTIVE)
if (LMY_options_.current_optimizer_type == OptimizerType::ADAPTIVE)
return adaptive_three_shift_run();
if (LMY_options_.current_optimizer_type_ == OptimizerType::DESCENT)
if (LMY_options_.current_optimizer_type == OptimizerType::DESCENT)
return descent_run();
#endif
if (LMY_options_.current_optimizer_type_ == OptimizerType::ONESHIFTONLY)
if (LMY_options_.current_optimizer_type == OptimizerType::ONESHIFTONLY)
return one_shift_run();
return previous_linear_methods_run();
@ -657,19 +657,19 @@ bool QMCFixedSampleLinearOptimizeBatched::processOptXML(xmlNodePtr opt_xml,
auto iter = OptimizerNames.find(MinMethod);
if (iter == OptimizerNames.end())
throw std::runtime_error("Unknown MinMethod!\n");
LMY_options_.previous_optimizer_type_ = LMY_options_.current_optimizer_type_;
LMY_options_.current_optimizer_type_ = OptimizerNames.at(MinMethod);
LMY_options_.previous_optimizer_type = LMY_options_.current_optimizer_type;
LMY_options_.current_optimizer_type = OptimizerNames.at(MinMethod);
if (LMY_options_.current_optimizer_type_ == OptimizerType::DESCENT && !descentEngineObj)
if (LMY_options_.current_optimizer_type == OptimizerType::DESCENT && !descentEngineObj)
descentEngineObj = std::make_unique<DescentEngine>(myComm, opt_xml);
// sanity check
if (LMY_options_.targetExcited && LMY_options_.current_optimizer_type_ != OptimizerType::ADAPTIVE &&
LMY_options_.current_optimizer_type_ != OptimizerType::DESCENT)
if (LMY_options_.targetExcited && LMY_options_.current_optimizer_type != OptimizerType::ADAPTIVE &&
LMY_options_.current_optimizer_type != OptimizerType::DESCENT)
APP_ABORT("LMY_options_.targetExcited = \"yes\" requires that MinMethod = \"adaptive or descent");
#ifdef _OPENMP
if (LMY_options_.current_optimizer_type_ == OptimizerType::ADAPTIVE && (omp_get_max_threads() > 1))
if (LMY_options_.current_optimizer_type == OptimizerType::ADAPTIVE && (omp_get_max_threads() > 1))
{
// throw std::runtime_error("OpenMP threading not enabled with AdaptiveThreeShift optimizer. Use MPI for parallelism instead, and set OMP_NUM_THREADS to 1.");
app_log() << "test version of OpenMP threading with AdaptiveThreeShift optimizer" << std::endl;
@ -698,9 +698,9 @@ bool QMCFixedSampleLinearOptimizeBatched::processOptXML(xmlNodePtr opt_xml,
"LMY_options_.cost_increase_tol must be non-negative in QMCFixedSampleLinearOptimizeBatched::put");
// if this is the first time this function has been called, set the initial shifts
if (bestShift_i < 0.0 && (LMY_options_.current_optimizer_type_ == OptimizerType::ADAPTIVE || LMY_options_.doHybrid))
if (bestShift_i < 0.0 && (LMY_options_.current_optimizer_type == OptimizerType::ADAPTIVE || LMY_options_.doHybrid))
bestShift_i = shift_i_input;
if (LMY_options_.current_optimizer_type_ == OptimizerType::ONESHIFTONLY)
if (LMY_options_.current_optimizer_type == OptimizerType::ONESHIFTONLY)
bestShift_i = shift_i_input;
if (bestShift_s < 0.0)
bestShift_s = shift_s_input;
@ -1070,17 +1070,17 @@ void QMCFixedSampleLinearOptimizeBatched::solveShiftsWithoutLMYEngine(
#ifdef HAVE_LMY_ENGINE
bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
{
EngineObj->setStoringSamples(LMY_options_.store_samples_);
EngineObj->setStoringSamples(LMY_options_.store_samples);
//Set whether LM will only update a filtered set of parameters
EngineObj->setFiltering(LMY_options_.filter_param_);
EngineObj->setFilterInfo(LMY_options_.filter_info_);
EngineObj->setFiltering(LMY_options_.filter_param);
EngineObj->setFilterInfo(LMY_options_.filter_info);
if (LMY_options_.filter_param_ && !LMY_options_.store_samples_)
if (LMY_options_.filter_param && !LMY_options_.store_samples)
myComm->barrier_and_abort(" Error: Parameter Filtration requires storing the samples. \n");
if (LMY_options_.filter_param_)
EngineObj->setThreshold(LMY_options_.ratio_threshold_);
if (LMY_options_.filter_param)
EngineObj->setThreshold(LMY_options_.ratio_threshold);
// remember what the cost function grads flag was
const bool saved_grads_flag = optTarget->getneedGrads();
@ -1131,7 +1131,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
}
//Reset parameter number for vdeps to the total number in case filtration happened on a previous iteration
if (LMY_options_.filter_param_)
if (LMY_options_.filter_param)
{
formic::VarDeps tmp_vdeps(numParams, std::vector<double>());
vdeps = tmp_vdeps;
@ -1145,7 +1145,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
EngineObj->turn_on_update();
//The initial intialization of the LM engine is handled differently if parameters are being filtered
if (!LMY_options_.filter_param_)
if (!LMY_options_.filter_param)
{
// initialize the engine if we do not use block lm or it's the first part of block lm
EngineObj->initialize(LMY_options_.nblocks, 0, LMY_options_.nkept, previous_update, false);
@ -1167,13 +1167,13 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
int new_num = 0;
//To handle different cases for the LM's mode of operation, first check if samples are being stored
if (LMY_options_.store_samples_)
if (LMY_options_.store_samples)
{
//Need to clear lists from previous iter
EngineObj->reset();
//If samples are being stored, check for the subcase where parameters are also being filtered
if (LMY_options_.filter_param_)
if (LMY_options_.filter_param)
{
EngineObj->selectParameters();
@ -1270,7 +1270,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
// get dimension of the linear method matrices
int N = numParams + 1;
if (LMY_options_.filter_param_)
if (LMY_options_.filter_param)
N = new_num + 1;
// have the cost function prepare derivative vectors
@ -1293,7 +1293,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
if (LMY_options_.block_lm)
{
if (!LMY_options_.store_samples_)
if (!LMY_options_.store_samples)
{
optTarget->setneedGrads(true);
@ -1315,7 +1315,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
finish();
if (LMY_options_.filter_param_)
if (LMY_options_.filter_param)
{
engine_start(EngineObj, *descentEngineObj, MinMethod);
EngineObj->buildMatricesFromDerivatives();
@ -1330,7 +1330,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
}
//Need to wipe the stored samples after they are no longer needed and before the next iteration
if (LMY_options_.store_samples_)
if (LMY_options_.store_samples)
{
EngineObj->clear_histories();
}
@ -1367,7 +1367,7 @@ bool QMCFixedSampleLinearOptimizeBatched::adaptive_three_shift_run()
//If paramters are being filtered need to expand the LM updates from the engine to the full parameter set.
//There will be updates of 0 for parameters that were filtered out before derivative ratios were used by the engine.
if (LMY_options_.filter_param_)
if (LMY_options_.filter_param)
{
std::vector<std::vector<ValueType>> tmpParameterDirections;
tmpParameterDirections.resize(shifts_i.size());
@ -1808,11 +1808,11 @@ bool QMCFixedSampleLinearOptimizeBatched::hybrid_run()
//Ensure LM engine knows it is being used as part of a hybrid run
EngineObj->setOnHybrid(true);
if (LMY_options_.current_optimizer_type_ == OptimizerType::ADAPTIVE)
if (LMY_options_.current_optimizer_type == OptimizerType::ADAPTIVE)
{
//If the optimization just switched to using the BLM, need to transfer a set
//of vectors to the BLM engine.
if (LMY_options_.previous_optimizer_type_ == OptimizerType::DESCENT)
if (LMY_options_.previous_optimizer_type == OptimizerType::DESCENT)
{
descentEngineObj->resetStorageCount();
std::vector<std::vector<ValueType>> hybridBLM_Input = descentEngineObj->retrieveHybridBLM_Input();
@ -1824,7 +1824,7 @@ bool QMCFixedSampleLinearOptimizeBatched::hybrid_run()
adaptive_three_shift_run();
}
if (LMY_options_.current_optimizer_type_ == OptimizerType::DESCENT)
if (LMY_options_.current_optimizer_type == OptimizerType::DESCENT)
descent_run();
app_log() << "Finished a hybrid step" << std::endl;

View File

@ -236,20 +236,23 @@ private:
///whether to do the third part of block lm
bool block_third = false;
///whether to filter parameters for the lm
bool filter_param_ = false;
bool filter_param = false;
///whether to filter parameters for the lm
bool filter_info_ = false;
bool filter_info = false;
///threshold for filtering parameters for the lm
double ratio_threshold_ = 0.0;
double ratio_threshold = 0.0;
///whether to store samples for the lm
bool store_samples_ = false;
bool store_samples = false;
///type of the previous optimization method, updated by processOptXML before run
OptimizerType previous_optimizer_type_ = OptimizerType::NONE;
OptimizerType previous_optimizer_type = OptimizerType::NONE;
///type of the current optimization method, updated by processOptXML before run
OptimizerType current_optimizer_type_ = OptimizerType::NONE;
OptimizerType current_optimizer_type = OptimizerType::NONE;
///whether to use hybrid method
bool doHybrid = false;
} LMY_options_;
};
/// LMY engine related options
LMYOptions LMY_options_;
// ------------------------------------

View File

@ -1546,8 +1546,6 @@ void cqmc::engine::LMYEngine<S>::get_brlm_update_alg_part_two(const formic::VarD
template<typename S>
void cqmc::engine::LMYEngine<S>::store_sample(std::vector<S> & der_rat_samp,std::vector<S> & le_der_samp,std::vector<S> & ls_der_samp,double vgs_samp,double weight_samp,int sample_index)
{
int myThread = omp_get_thread_num();
for(int i = 0; i < le_der_samp.size();i++)
{
le_der_rat_history.at(sample_index,i) = le_der_samp[i];