This notebook analyzes the relationship between private credit expansion (as a percentage of GDP) and subsequent duration returns.
Hypothesis: Higher private credit creation is associated with negative subsequent returns in duration. Strong credit growth increases the probability of policy tightening—all else equal, this bodes ill for long duration positions.
Data source: We use JPMaQS (J.P. Morgan Macrosynergy Quantamental System) for panel data across developed and emerging markets. We validate this data against our own calculations using Haver.
import os
import pandas as pd
import macrosynergy.management as msm
import macrosynergy.panel as msp
import macrosynergy.signal as mss
import macrosynergy.pnl as msn
import macrosynergy.visuals as msv
from dotenv import load_dotenv
load_dotenv()
from tulip.plots import plot_line, plot_lines
from macrosynergy.download import JPMaQSDownload
from timeit import default_timer as timer
from datetime import timedelta, date
import warnings
from IPython.display import HTML
warnings.simplefilter("ignore")Setup: Data Download¶
We download the full panel of private credit indicators from JPMaQS for all countries. This includes the US data we’ll use for validation.
# Cross-sections of interest
# These define the currency areas we'll analyze for private credit expansion
cids_dmca = [
"AUD",
"CAD",
"CHF",
"EUR",
"GBP",
"JPY",
"NOK",
"NZD",
"SEK",
"USD",
] # DM currency areas
cids_dmec = ["DEM", "ESP", "FRF", "ITL", "NLG"] # DM euro area countries
cids_latm = ["BRL", "COP", "CLP", "MXN", "PEN"] # Latam countries
cids_emea = ["CZK", "HUF", "ILS", "PLN", "RON", "RUB", "TRY", "ZAR"] # EMEA countries
cids_emas = [
"CNY",
"HKD",
"IDR",
"INR",
"KRW",
"MYR",
"PHP",
"SGD",
"THB",
"TWD",
] # EM Asia countries
cids_dm = cids_dmca + cids_dmec
cids_em = cids_latm + cids_emea + cids_emas
cids = sorted(cids_dm + cids_em)# Quantamental categories of interest
# Main private credit indicators
main = [
"PCREDITBN_SJA_P1M1ML12", # Private bank credit, % oya
"PCREDITGDP_SJA_D1M1ML12", # Private credit expansion as % of GDP
"PCREDITBN_SJA_P1M1ML12_D1M1ML12",
"PCREDITBN_SJA_P1M1ML12_D3M3ML3",
"PCREDITBN_SJA_P1M1ML12_D6M6ML6",
"PCREDITGDP_SJA_D1M1ML12_D1M1ML12",
"PCREDITGDP_SJA_D1M1ML12_D3M3ML3",
"PCREDITGDP_SJA_D1M1ML12_D6M6ML6",
]
# Economic context indicators (needed for excess credit calculation)
econ = ["RIR_NSA", "RGDP_SA_P1Q1QL4_20QMA", "INFTEFF_NSA"]
# Market return indicators
mark = [
"DU05YXR_VT10", # 5-year IRS fixed receiver returns, vol-targeted
"FXXR_NSA", # FX forward returns
"FXTARGETED_NSA", # For blacklist
"FXUNTRADABLE_NSA", # For blacklist
]
xcats = main + econ + mark# Download series from J.P. Morgan DataQuery
start_date = "1990-01-01"
tickers = [cid + "_" + xcat for cid in cids for xcat in xcats]
# print(f"Maximum number of tickers is {len(tickers)}")
# Retrieve credentials from environment
client_id: str = os.getenv("JP_CLIENT_ID")
client_secret: str = os.getenv("JP_CLIENT_SECRET")
# with redirect_stdout(io.StringIO()):
# https://docs.macrosynergy.com/stable/_modules/macrosynergy/download/jpmaqs.html#JPMaQSDownload
with JPMaQSDownload(
client_id=client_id, client_secret=client_secret, print_debug_data=False
) as downloader:
start = timer()
df = downloader.download(
tickers=tickers,
start_date=start_date,
metrics=["value", "eop_lag", "mop_lag", "grading"],
suppress_warning=True,
show_progress=False,
)
end = timer()
# print("Download time from DQ: " + str(timedelta(seconds=end - start)))
from IPython.display import clear_output
clear_output()# Create FX blacklist (periods when FX is targeted or untradable)
dfx = df.copy()
dfb = dfx[dfx["xcat"].isin(["FXTARGETED_NSA", "FXUNTRADABLE_NSA"])].loc[
:, ["cid", "xcat", "real_date", "value"]
]
dfba = (
dfb.groupby(["cid", "real_date"])
.aggregate(value=pd.NamedAgg(column="value", aggfunc="max"))
.reset_index()
)
dfba["xcat"] = "FXBLACK"
fxblack = msp.make_blacklist(dfba, "FXBLACK")
# Define expected cross-sections (excluding those without data)
cids_exp = sorted(list(set(cids) - set(["ARS", "HKD"])))# Calculate excess private credit growth (versus long-term nominal GDP benchmark)
# PCBASIS = expected nominal credit growth based on inflation target + long-term real GDP growth
dfa = msp.panel_calculator(
dfx,
["PCBASIS = INFTEFF_NSA + RGDP_SA_P1Q1QL4_20QMA"],
cids=cids_exp,
)
dfx = msm.update_df(dfx, dfa)
# Calculate excess private credit growth (actual vs benchmark)
pcgs = ["PCREDITBN_SJA_P1M1ML12"]
for pcg in pcgs:
calc_pcx = f"{pcg}vLTB = {pcg} - PCBASIS"
dfa = msp.panel_calculator(dfx, calcs=[calc_pcx], cids=cids_exp)
dfx = msm.update_df(dfx, dfa)
# Compute z-scores for key credit metrics (normalized by country-specific means and std devs)
xcatx = [
"PCREDITGDP_SJA_D1M1ML12",
"PCREDITBN_SJA_P1M1ML12vLTB",
"PCREDITBN_SJA_P1M1ML12",
]
for xcat in xcatx:
dfa = msp.make_zn_scores(
dfx,
cids=list(cids_exp),
xcat=xcat,
start="2000-01-01",
est_freq="m",
neutral="mean",
pan_weight=0, # country-specific normalization (no panel weight)
)
dfx = msm.update_df(dfx, dfa)Data Validation: JPMaQS vs Haver¶
We validate the JPMaQS PCREDITGDP_SJA_D1M1ML12 series against our own calculation using Haver data:
United States Cross-check¶
# Extract USD credit data from the panel we already downloaded
usd_jpmaqs = (
df.loc[(df["cid"] == "USD") & (df["xcat"] == "PCREDITGDP_SJA_D1M1ML12"), :]
.set_index("real_date")["value"]
.resample("ME")
.last()
.rename("via JPMaQS")
)
# Pull USD credit data from Haver and calculate ourselves
from tulip.data.haver import HaverApiClient
with HaverApiClient() as haver_client:
us_credit_haver = haver_client.get_series("FABW@USECON").ts # Private credit stock
us_ngdp_haver = haver_client.get_series("NGDP@USECON").ts # Nominal GDP
# Calculate: 12-month change in credit as % of trailing 4-quarter GDP
us_ngdp_annual = (
us_ngdp_haver.rolling(4)
.sum()
.reindex(us_credit_haver.index, method="bfill")
.ffill()
)
us_credit_gdp_haver = (us_credit_haver.diff(12) / us_ngdp_annual * 100).rename(
"via Haver"
)
plot_line(
blue=usd_jpmaqs,
red=us_credit_gdp_haver,
title="<b>US Private Credit Expansion as % of GDP</b>: JPMaQS vs Haver",
tick_suffix="%",
years_limit=15,
)
Germany Cross-check¶
# Extract USD credit data from the panel we already downloaded
dem_jpmaqs = (
df.loc[(df["cid"] == "DEM") & (df["xcat"] == "PCREDITGDP_SJA_D1M1ML12"), :]
.set_index("real_date")["value"]
.resample("ME")
.last()
.rename("via JPMaQS")
)
# Pull USD credit data from Haver and calculate ourselves
from tulip.data.haver import HaverApiClient
with HaverApiClient() as haver_client:
dem_credit_haver = (
haver_client.get_series("DENFCNB@GERMANY").ts / 1000
) # Private credit stock
dem_ngdp_haver = haver_client.get_series("DENNGDP@GERMANY").ts # Nominal GDP
# Calculate: 12-month change in credit as % of trailing 4-quarter GDP
dem_ngdp_annual = (
dem_ngdp_haver.rolling(4)
.sum()
.reindex(dem_credit_haver.index, method="bfill")
.ffill()
)
dem_credit_gdp_haver = (dem_credit_haver.diff(12) / dem_ngdp_annual * 100).rename(
"via Haver"
)
plot_line(
blue=dem_jpmaqs,
red=dem_credit_gdp_haver,
title="<b>German Private Credit Expansion as % of GDP</b>: JPMaQS vs Haver",
tick_suffix="%",
years_limit=15,
)Canada Cross-check¶
# Extract USD credit data from the panel we already downloaded
cad_jpmaqs = (
df.loc[(df["cid"] == "CAD") & (df["xcat"] == "PCREDITGDP_SJA_D1M1ML12"), :]
.set_index("real_date")["value"]
.resample("ME")
.last()
.rename("via JPMaQS")
)
# Pull USD credit data from Haver and calculate ourselves
from tulip.data.haver import HaverApiClient
with HaverApiClient() as haver_client:
# Millions of CAD
cad_credit_liabs_hh = haver_client.get_series(
"V1C15625@CANADA"
).ts # Private credit stock
cad_credit_liabs_nfc = haver_client.get_series(
"V1C15703@CANADA"
).ts # Private credit stock
cad_ngdp_haver = haver_client.get_series("V6E05783@CANADA").ts # Millions SAAR
cad_ngdp_haver = cad_ngdp_haver.rolling(4).mean().resample("ME").ffill().reindex()
cad_credit_creation = pd.concat(
[
cad_credit_liabs_hh.squeeze().rename("HH"),
cad_credit_liabs_nfc.squeeze().rename("NFC"),
],
axis=1,
)
cad_credit_creation_total = (
(
cad_credit_creation.sum(axis=1).diff(12)
/ cad_ngdp_haver.reindex(cad_credit_creation.index).ffill()
)
.squeeze()
.rename("via Haver")
)
cad_credit_creation = cad_credit_creation.diff(12).div(
cad_ngdp_haver.reindex(cad_credit_creation.index).ffill(), axis=0
)
cad_credit_creation["total"] = cad_credit_creation.sum(axis=1)
assert cad_credit_creation["total"].tail(1)[0] == cad_credit_creation_total.tail(1)[0]plot_lines(
[cad_jpmaqs / 100, cad_credit_creation_total],
title="<b>Canadian Private Credit Expansion as % of GDP</b>: JPMaQS vs Haver",
tick_format="0.0%",
years_limit=15,
)plot_lines(
cad_credit_creation.iloc[:, :2],
title="<b>Canadian Private Credit Expansion as % of GDP</b> Breakdown by Sector",
tick_format="0.0%",
years_limit=15,
)United Kingdom Private Credit Creation Cross-check¶
# Extract USD credit data from the panel we already downloaded
uk_jpmaqs = (
df.loc[(df["cid"] == "GBP") & (df["xcat"] == "PCREDITGDP_SJA_D1M1ML12"), :]
.set_index("real_date")["value"]
.resample("ME")
.last()
.rename("via JPMaQS")
)
with HaverApiClient() as haver_client:
# Millions of GBP
uk_credit_liabs_privates = haver_client.get_series(
"UNB2UQ@UK"
).ts # Private credit stock
uk_ngdp_haver = (
haver_client.get_series("YBHAQ@UK").ts.rolling(4).sum().resample("ME").ffill()
)
uk_credit_creation_total = (
(
uk_credit_liabs_privates.diff(12)
/ uk_ngdp_haver.reindex(uk_credit_liabs_privates.index).ffill()
)
.squeeze()
.rename("via Haver")
)
plot_lines(
[uk_jpmaqs / 100, uk_credit_creation_total],
title="<b>UK Private Credit Expansion as % of GDP</b>: JPMaQS vs Haver",
tick_format="0.0%",
years_limit=15,
show_0=True,
)Panel Analysis: Credit Expansion & Duration Returns¶
We analyze the relationship between private credit expansion and subsequent 5-year IRS duration returns across developed and emerging markets. The hypothesis is that higher credit growth leads to policy tightening, which is negative for duration.
# Panel regression: Credit expansion vs subsequent duration returns
xcatx_map = {
"ZN": "PCREDITGDP_SJA_D1M1ML12ZN",
"excess": "PCREDITBN_SJA_P1M1ML12vLTB",
"excessZN": "PCREDITBN_SJA_P1M1ML12vLTBZN",
}
cidx = {
"10 developed markets": list(set(cids_dmca)),
"19 emerging markets": list(set(cids_em) - set(["HKD", "PEN", "PHP", "RON"])),
} # no data for these currencies
xlab_dict = {
"PCREDITGDP_SJA_D1M1ML12ZN": "Private credit growth as % of GDP, z-scored, ",
"PCREDITBN_SJA_P1M1ML12vLTB": "Excess private credit growth (vs nominal GDP trend), %oya, ",
"PCREDITBN_SJA_P1M1ML12vLTBZN": "Excess private credit growth, %oya, z-score, ",
}
cr = {}
for identifier, sig in xcatx_map.items():
for cid_name, cid_list in cidx.items():
key = f"cr_{identifier}{cid_name}"
cr[key] = msp.CategoryRelations(
dfx,
xcats=[sig, "DU05YXR_VT10"],
cids=cid_list,
freq="Q",
lag=1,
xcat_aggs=["last", "sum"],
blacklist=fxblack,
start="2000-01-01",
xcat_trims=[None, None],
)
all_cr_instances = [
cr[f"cr_{identifier}{cid_name}"]
for identifier in xcatx_map.keys()
for cid_name in cidx.keys()
]
subplot_titles = [
f"{xlab_dict[xcatx_map[identifier]]} {cid_name}"
for identifier in xcatx_map.keys()
for cid_name in cidx.keys()
]
msv.multiple_reg_scatter(
all_cr_instances,
title="Private credit growth and subsequent duration returns, since 2000",
title_fontsize=26,
xlab="Private bank credit, jump-adjusted, % over a year ago",
ylab="5-year IRS fixed receiver returns, vol-targeted, next quarter",
ncol=2,
nrow=3,
figsize=(24, 18),
prob_est="map",
subplot_titles=subplot_titles,
coef_box="upper right",
coef_box_size=(0.8, 4),
coef_box_font_size=18,
subplot_title_fontsize=20,
label_fontsize=18,
)Current Status¶
Latest private credit expansion readings for G10 + major emerging markets. Higher values indicate faster credit growth relative to GDP, which historically predicts weaker duration returns.
# G10 + major EM countries for status display
cids_status = [
"USD",
"EUR",
"GBP",
"JPY",
"AUD",
"CAD",
"CHF",
"NOK",
"NZD",
"SEK", # G10
"BRL",
"CNY",
"INR",
"MXN",
"KRW",
] # Major EM
# Three metrics to display
metrics = {
"PCREDITGDP_SJA_D1M1ML12": "Credit/GDP (%)",
"PCREDITGDP_SJA_D1M1ML12ZN": "Credit/GDP (Z)",
"PCREDITBN_SJA_P1M1ML12vLTB": "Excess Credit (%)",
"PCREDITBN_SJA_P1M1ML12vLTBZN": "Excess Credit (Z)",
}
# Get latest readings for each metric
latest_data = {}
for xcat, label in metrics.items():
latest = (
dfx[dfx["xcat"] == xcat]
.sort_values("real_date")
.groupby("cid")
.last()
.loc[lambda x: x.index.isin(cids_status), "value"]
.rename(label)
)
latest_data[label] = latest
# Combine into single DataFrame and sort by the z-score (Excess Credit Z)
latest_credit = pd.DataFrame(latest_data).dropna()
latest_credit = latest_credit.sort_values("Excess Credit (Z)", ascending=False)
# Get the latest date for display
as_of_date = dfx[dfx["xcat"] == "PCREDITGDP_SJA_D1M1ML12ZN"]["real_date"].max()
# Display styled table
print(f"Latest Private Credit Metrics (as of {as_of_date.strftime('%Y-%m-%d')})")
latest_credit.style.format("{:.2f}").background_gradient(
subset=["Credit/GDP (Z)", "Excess Credit (Z)"], cmap="RdYlGn_r"
)msp.view_timelines(
df,
xcats=["PCREDITBN_SJA_P1M1ML12"],
cids=cids_dm,
ncol=4,
start="1995-01-01",
title="Private credit growth, %oya",
same_y=False,
cs_mean=True,
aspect=2,
height=3, # de-facto determines plot size relative to texts
title_adj=1.02, # below 1 reduces facet high to make space for the title
label_adj=0.08,
xcat_labels=["Credit growth", "Global average"],
)