Merge pull request '对xtuner的注释进行了一些补充' (#6) from xinran/openGauss-server:master into master

This commit is contained in:
xiangxinyong 2022-08-06 19:03:41 +08:00
commit 751e1c4267
1 changed files with 23 additions and 0 deletions

View File

@ -173,11 +173,16 @@ def procedure_main(mode, db_info, config):
def rl_model(mode, env, config):
# Lazy loading. Because loading Tensorflow takes a long time.
from tuner.algorithms.rl_agent import RLAgent
# Start reinforcement learning agent class.
rl = RLAgent(env, alg=config['rl_algorithm'])
# The two modes of training and tuning correspond to different execution processes.
# The model needs to be trained before it can be used for tuning. The output of the training and tuning process is the list of parameters to be tuned. Because they share a set of models, it is required that the list of parameters to be tuned must be consistent in the two modes, otherwise exceptions with different output dimensions will be thrown.
if mode == 'train':
logging.warning('The list of tuned knobs in the training mode '
'based on the reinforcement learning algorithm must be the same as '
'that in the tuning mode. ')
# The key parameter is the maximum iteration round rl_ steps, theoretically, the longer the more accurate, but also more time-consuming.
# max_episode_steps is the maximum number of rounds in each round of reinforcement learning algorithm. In the implementation of x-tuner, this parameter is weakened, and it is generally default.
rl.fit(config['rl_steps'], nb_max_episode_steps=config['max_episode_steps'])
rl.save(config['rl_model_path'])
logging.info('Saved reinforcement learning model at %s.', config['rl_model_path'])
@ -200,6 +205,7 @@ def rl_model(mode, env, config):
def global_search(env, config):
method = config['gop_algorithm']
# Determine which algorithm to use.
if method == 'bayes':
from bayes_opt import BayesianOptimization
@ -207,6 +213,13 @@ def global_search(env, config):
pbound = {name: (0, 1) for name in env.db.ordered_knob_list}
def performance_function(**params):
"""
function name: performance_function
description: Define a black box function to adapt to the interface of the third-party library.
author: Li Xinran
date: 2022/8/4
contact: 19154068808
"""
if not len(params) == env.nb_actions:
raise AssertionError('Failed to check the input feature dimension.')
@ -222,12 +235,21 @@ def global_search(env, config):
pbounds=pbound
)
optimizer.maximize(
# The larger the maximum iteration round, the more accurate the result is, but it is also more time-consuming.
n_iter=config['max_iterations']
)
elif method == 'pso':
from tuner.algorithms.pso import Pso
def performance_function(v):
"""
function name: performance_function
description: Find the global minimum value.
note: Because the implementation of PSO algorithm is to find the global minimum value, take the opposite number here, so we need to change to take the global maximum value.
author: Li Xinran
date: 2022/8/4
contact: 19154068808
"""
s, r, d, _ = env.step(v)
return -r # Use -reward because PSO wishes to minimize.
@ -237,6 +259,7 @@ def global_search(env, config):
particle_nums=config['particle_nums'],
# max_iterations on the PSO indicates the maximum number of iterations per particle,
# so it must be divided by the number of particles to be consistent with Bayes.
# The larger the maximum iteration round is, the more accurate the result is, but also the more time-consuming.
max_iteration=config['max_iterations'] // config['particle_nums'],
x_min=0, x_max=1, max_vel=0.5
)