To re-org Q-results
This commit is contained in:
@@ -65,16 +65,16 @@ def update_market(config, market):
|
||||
def run_exp(task_config, dataset, experiment_name, recorder_name, uri):
|
||||
|
||||
# model initiaiton
|
||||
print('')
|
||||
print('[{:}] - [{:}]: {:}'.format(experiment_name, recorder_name, uri))
|
||||
print('dataset={:}'.format(dataset))
|
||||
print("")
|
||||
print("[{:}] - [{:}]: {:}".format(experiment_name, recorder_name, uri))
|
||||
print("dataset={:}".format(dataset))
|
||||
|
||||
model = init_instance_by_config(task_config["model"])
|
||||
|
||||
# start exp
|
||||
with R.start(experiment_name=experiment_name, recorder_name=recorder_name, uri=uri):
|
||||
|
||||
log_file = R.get_recorder().root_uri / '{:}.log'.format(experiment_name)
|
||||
log_file = R.get_recorder().root_uri / "{:}.log".format(experiment_name)
|
||||
set_log_basic_config(log_file)
|
||||
|
||||
# train model
|
||||
@@ -109,12 +109,14 @@ def main(xargs, exp_yaml):
|
||||
qlib.init(**config.get("qlib_init"))
|
||||
dataset_config = config.get("task").get("dataset")
|
||||
dataset = init_instance_by_config(dataset_config)
|
||||
pprint('args: {:}'.format(xargs))
|
||||
pprint("args: {:}".format(xargs))
|
||||
pprint(dataset_config)
|
||||
pprint(dataset)
|
||||
|
||||
for irun in range(xargs.times):
|
||||
run_exp(config.get("task"), dataset, xargs.alg, "recorder-{:02d}-{:02d}".format(irun, xargs.times), xargs.save_dir)
|
||||
run_exp(
|
||||
config.get("task"), dataset, xargs.alg, "recorder-{:02d}-{:02d}".format(irun, xargs.times), xargs.save_dir
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
135
exps/trading/organize_results.py
Normal file
135
exps/trading/organize_results.py
Normal file
@@ -0,0 +1,135 @@
|
||||
#####################################################
|
||||
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.02 #
|
||||
#####################################################
|
||||
# python exps/trading/organize_results.py
|
||||
#####################################################
|
||||
import sys, argparse
|
||||
import numpy as np
|
||||
from typing import List, Text
|
||||
from collections import defaultdict, OrderedDict
|
||||
from pathlib import Path
|
||||
from pprint import pprint
|
||||
import ruamel.yaml as yaml
|
||||
|
||||
lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve()
|
||||
if str(lib_dir) not in sys.path:
|
||||
sys.path.insert(0, str(lib_dir))
|
||||
|
||||
import qlib
|
||||
from qlib.config import REG_CN
|
||||
from qlib.workflow import R
|
||||
|
||||
|
||||
class QResult:
|
||||
|
||||
def __init__(self):
|
||||
self._result = defaultdict(list)
|
||||
|
||||
def append(self, key, value):
|
||||
self._result[key].append(value)
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return self._result
|
||||
|
||||
def update(self, metrics, filter_keys=None):
|
||||
for key, value in metrics.items():
|
||||
if filter_keys is not None and key in filter_keys:
|
||||
key = filter_keys[key]
|
||||
elif filter_keys is not None:
|
||||
continue
|
||||
self.append(key, value)
|
||||
|
||||
@staticmethod
|
||||
def full_str(xstr, space):
|
||||
xformat = '{:' + str(space) + 's}'
|
||||
return xformat.format(str(xstr))
|
||||
|
||||
def info(self, keys: List[Text], separate: Text = '', space: int = 25, show=True):
|
||||
avaliable_keys = []
|
||||
for key in keys:
|
||||
if key not in self.result:
|
||||
print('There are invalid key [{:}].'.format(key))
|
||||
else:
|
||||
avaliable_keys.append(key)
|
||||
head_str = separate.join([self.full_str(x, space) for x in avaliable_keys])
|
||||
values = []
|
||||
for key in avaliable_keys:
|
||||
current_values = self._result[key]
|
||||
mean = np.mean(current_values)
|
||||
std = np.std(current_values)
|
||||
values.append('{:.4f} $\pm$ {:.4f}'.format(mean, std))
|
||||
value_str = separate.join([self.full_str(x, space) for x in values])
|
||||
if show:
|
||||
print(head_str)
|
||||
print(value_str)
|
||||
else:
|
||||
return head_str, value_str
|
||||
|
||||
|
||||
def compare_results(heads, values, names, space=10):
|
||||
for idx, x in enumerate(heads):
|
||||
assert x == heads[0], '[{:}] {:} vs {:}'.format(idx, x, heads[0])
|
||||
new_head = QResult.full_str('Name', space) + heads[0]
|
||||
print(new_head)
|
||||
for name, value in zip(names, values):
|
||||
xline = QResult.full_str(name, space) + value
|
||||
print(xline)
|
||||
|
||||
|
||||
def filter_finished(recorders):
|
||||
returned_recorders = dict()
|
||||
not_finished = 0
|
||||
for key, recorder in recorders.items():
|
||||
if recorder.status == "FINISHED":
|
||||
returned_recorders[key] = recorder
|
||||
else:
|
||||
not_finished += 1
|
||||
return returned_recorders, not_finished
|
||||
|
||||
|
||||
def main(xargs):
|
||||
R.reset_default_uri(xargs.save_dir)
|
||||
experiments = R.list_experiments()
|
||||
|
||||
key_map = {"IC": "IC",
|
||||
"ICIR": "ICIR",
|
||||
"Rank IC": "Rank_IC",
|
||||
"Rank ICIR": "Rank_ICIR",
|
||||
"excess_return_with_cost.annualized_return": "Annualized_Return",
|
||||
"excess_return_with_cost.information_ratio": "Information_Ratio",
|
||||
"excess_return_with_cost.max_drawdown": "Max_Drawdown"}
|
||||
all_keys = list(key_map.values())
|
||||
|
||||
print("There are {:} experiments.".format(len(experiments)))
|
||||
head_strs, value_strs, names = [], [], []
|
||||
for idx, (key, experiment) in enumerate(experiments.items()):
|
||||
if experiment.id == '0':
|
||||
continue
|
||||
recorders = experiment.list_recorders()
|
||||
recorders, not_finished = filter_finished(recorders)
|
||||
print(
|
||||
"====>>>> {:02d}/{:02d}-th experiment {:9s} has {:02d}/{:02d} finished recorders.".format(
|
||||
idx, len(experiments), experiment.name, len(recorders), len(recorders) + not_finished
|
||||
)
|
||||
)
|
||||
result = QResult()
|
||||
for recorder_id, recorder in recorders.items():
|
||||
result.update(recorder.list_metrics(), key_map)
|
||||
head_str, value_str = result.info(all_keys, show=False)
|
||||
head_strs.append(head_str)
|
||||
value_strs.append(value_str)
|
||||
names.append(experiment.name)
|
||||
compare_results(head_strs, value_strs, names, space=10)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser("Show Results")
|
||||
parser.add_argument("--save_dir", type=str, default="./outputs/qlib-baselines", help="The checkpoint directory.")
|
||||
args = parser.parse_args()
|
||||
|
||||
provider_uri = "~/.qlib/qlib_data/cn_data"
|
||||
qlib.init(provider_uri=provider_uri, region=REG_CN)
|
||||
|
||||
main(args)
|
@@ -4,7 +4,7 @@
|
||||
# Refer to:
|
||||
# - https://github.com/microsoft/qlib/blob/main/examples/workflow_by_code.ipynb
|
||||
# - https://github.com/microsoft/qlib/blob/main/examples/workflow_by_code.py
|
||||
# python exps/trading/workflow_tt.py
|
||||
# python exps/trading/workflow_tt.py --market all
|
||||
#####################################################
|
||||
import sys, argparse
|
||||
from pathlib import Path
|
||||
@@ -102,10 +102,9 @@ def main(xargs):
|
||||
|
||||
task = dict(model=model_config, dataset=dataset_config, record=record_config)
|
||||
|
||||
|
||||
# start exp to train model
|
||||
with R.start(experiment_name="tt_model", uri=xargs.save_dir):
|
||||
set_log_basic_config(R.get_recorder().root_uri / 'log.log')
|
||||
with R.start(experiment_name="tt_model", uri=xargs.save_dir + "-" + xargs.market):
|
||||
set_log_basic_config(R.get_recorder().root_uri / "log.log")
|
||||
|
||||
model = init_instance_by_config(model_config)
|
||||
dataset = init_instance_by_config(dataset_config)
|
||||
@@ -138,7 +137,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--market", type=str, default="csi300", help="The market indicator.")
|
||||
args = parser.parse_args()
|
||||
|
||||
provider_uri = "~/.qlib/qlib_data/cn_data" # target_dir
|
||||
provider_uri = "~/.qlib/qlib_data/cn_data"
|
||||
qlib.init(provider_uri=provider_uri, region=REG_CN)
|
||||
|
||||
main(args)
|
||||
|
Reference in New Issue
Block a user