MAD Portfolio optimization

I want to solve three AMPL model,I want to insert first two model output to final model as minimum return,minimum risk,max return,max risk.I wrote the following:
tscv = TimeSeriesSplit(max_train_size=11, test_size=1)
for train_index, test_index in tscv.split(assets):
train_assets = assets.iloc[train_index]
test_assets = assets.iloc[test_index]

    daily_returns = train_assets.pct_change().dropna()
    mean_return = daily_returns.mean()
    
    daily_returns["Date"] = daily_returns.index.format()
    daily_returns.set_index("Date", inplace=True)
    #min
    ampl = AMPL()
    ampl.read("mad_portfolio.mod")

    ampl.set["ASSETS"] = list(assets.columns)
    ampl.set["TIME"] = daily_returns.index
    ampl.param["daily_returns"] = daily_returns
    ampl.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    ampl.param["w_lb"] = 0
    ampl.param["w_ub"] = 0.2
    ampl.param["R"] = 0.001
    ampl.option["solver"] = SOLVER
    ampl.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df = ampl.var["w"].to_pandas()
   # all_weights.append(weights_df)
    R_min=mean_return.dot(weights_df)
    G_min=ampl.get_value('sum{t in TIME}(u[t] + v[t]) / card(TIME)')
    #max
    maxi = AMPL()
    maxi.read("mad_portfolio_risk_free_max.mod")

    maxi.set["ASSETS"] = list(assets.columns)
    maxi.set["TIME"] = daily_returns.index
    maxi.param["daily_returns"] = daily_returns
    maxi.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    maxi.param["w_lb"] = 0
    maxi.param["w_ub"] = 0.2
    maxi.param["R"] = 0.001
    maxi.option["solver"] = SOLVER
    maxi.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df_max = maxi.var["w"].to_pandas()
    #all_weights.append(weights_df)
    R_max=mean_return.dot(weights_df_max)
    G_max=maxi.get_value('sum{t in TIME}(u[t] + v[t]) / card(TIME)')
    

    #daily_returns_test = test_assets.pct_change().dropna()
    #mean_return_test = daily_returns_test.mean()
    m = AMPL()
    m.read("mad_portfolio_risk_free_max.mod")

    m.set["ASSETS"] = list(assets.columns)
    m.set["TIME"] = daily_returns.index
    m.param["daily_returns"] = daily_returns
    m.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    m.param["R_min"] = R_min
    m.param["R_max"] = R_max
    m.param["G_min"] = G_min
    m.param["G_max"] = G_max
    m.param["risk_aversion"] = ml(assets)
    m.param["w_lb"] = 0
    m.param["w_ub"] = 0.2
    m.param["R"] = 0.001
    m.option["solver"] = SOLVER
    m.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df_max = m.var["w"].to_pandas()
    #all_weights.append(weights_df)
    #R_max.append(mean_return.dot(weights_df_max))
    #G_max.append(m.get_value('sum{t in TIME}(u[t] + v[t]) / card(TIME)'))
    #print(R_max)

But I got following error:

my m model as follows:
param R default 0;
param rf default 0;
param w_lb default 0;
param w_ub default 1;

set ASSETS;
set TIME;

param daily_returns{TIME, ASSETS};
param mean_return{ASSETS};
param R_min{TIME};
param R_max{TIME};
param G_min{TIME};
param G_max{TIME};
param risk_aversion{TIME};

var w{ASSETS};
var u{TIME} >= 0;
var v{TIME} >= 0;

maximize MAD:(sum{j in ASSETS} mean_return{ASSETS}-R_min)/(R_min-R_max)w[j]-risk_aversion((sum{t in TIME}(u[t] + v[t]) / card(TIME))-G_min)/(G_max-G_min);

s.t. portfolio_returns {t in TIME}:
u[t] - v[t] == sum{j in ASSETS}(w[j] * (daily_returns[t, j] - mean_return[j]));

s.t. sum_of_weights: sum{j in ASSETS} w[j] <= 1;

s.t. mean_portfolio_return: sum{j in ASSETS}(w[j] * (mean_return[j] - rf)) >= R - rf;

s.t. no_short {j in ASSETS}: w[j] >= w_lb;

s.t. diversify {j in ASSETS}: w[j] <= w_ub;

Then I changed my code as follows:

tscv = TimeSeriesSplit(max_train_size=11, test_size=1)
for train_index, test_index in tscv.split(assets):
train_assets = assets.iloc[train_index]
test_assets = assets.iloc[test_index]

    daily_returns = train_assets.pct_change().dropna()
    mean_return = daily_returns.mean()
    
    daily_returns["Date"] = daily_returns.index.format()
    daily_returns.set_index("Date", inplace=True)
    #min
    ampl = AMPL()
    ampl.read("mad_portfolio.mod")

    ampl.set["ASSETS"] = list(assets.columns)
    ampl.set["TIME"] = daily_returns.index
    ampl.param["daily_returns"] = daily_returns
    ampl.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    ampl.param["w_lb"] = 0
    ampl.param["w_ub"] = 0.2
    ampl.param["R"] = 0.001
    ampl.option["solver"] = SOLVER
    ampl.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df = ampl.var["w"].to_pandas()
   # all_weights.append(weights_df)
    R_min=mean_return.dot(weights_df)
    G_min=ampl.get_value('sum{t in TIME}(u[t] + v[t]) / card(TIME)')
    #max
    maxi = AMPL()
    maxi.read("mad_portfolio_risk_free_max.mod")

    maxi.set["ASSETS"] = list(assets.columns)
    maxi.set["TIME"] = daily_returns.index
    maxi.param["daily_returns"] = daily_returns
    maxi.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    maxi.param["w_lb"] = 0
    maxi.param["w_ub"] = 0.2
    maxi.param["R"] = 0.001
    maxi.option["solver"] = SOLVER
    maxi.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df_max = maxi.var["w"].to_pandas()
    #all_weights.append(weights_df)
    R_max=mean_return.dot(weights_df_max)
    G_max=maxi.get_value('sum{t in TIME}(u[t] + v[t]) / card(TIME)')
    

    #daily_returns_test = test_assets.pct_change().dropna()
    #mean_return_test = daily_returns_test.mean()
    m = AMPL()
    m.read("mad_portfolio_risk_adjusted.mod")

    m.set["ASSETS"] = list(assets.columns)
    m.set["TIME"] = daily_returns.index
    m.param["daily_returns"] = daily_returns
    m.param["mean_return"] = mean_return
    #ampl.getParameter("daily_returns").setValues(daily_returns_train.stack())
    #ampl.getParameter("mean_return").setValues(mean_return_train)
    m.param["R_min"] = R_min
    m.param["R_max"] = R_max
    m.param["G_min"] = G_min
    m.param["G_max"] = G_max
    m.param["risk_aversion"] = ml(assets)
    m.param["w_lb"] = 0
    m.param["w_ub"] = 0.2
    m.param["R"] = 0.001
    m.option["solver"] = SOLVER
    m.solve()
    #ampl.var["w"].to_pandas()
    #ampl.get_data("w").to_pandas().plot.barh()
    #ampl.get_data["w"].to_pandas()
    weights_df_final = m.var["w"].to_pandas()

my m AMPL model is:
param R default 0;
param rf default 0;
param w_lb default 0;
param w_ub default 1;

set ASSETS;
set TIME;

param daily_returns{TIME, ASSETS};
param mean_return{ASSETS};
param R_min{ASSETS};
param R_max{ASSETS};
param G_min{TIME};
param G_max{TIME};
param risk_aversion{TIME};

var w{ASSETS};
var u{TIME} >= 0;
var v{TIME} >= 0;

maximize MAD:risk_aversion[t in Time](sum{j in ASSETS}(mean_return[j]-R_min[j])/(R_min[j]-R_max[j]))-(1-risk_aversion[t in TIME])(((sum{t in TIME}(u[t] + v[t]) / card(TIME))-G_min[t])/(G_max[t]-G_min[t]));

s.t. portfolio_returns {t in TIME}:
u[t] - v[t] == sum{j in ASSETS}(w[j] * (daily_returns[t, j] - mean_return[j]));

s.t. sum_of_weights: sum{j in ASSETS} w[j] <= 1;

s.t. mean_portfolio_return: sum{j in ASSETS}(w[j] * (mean_return[j] - rf)) >= R - rf;

s.t. no_short {j in ASSETS}: w[j] >= w_lb;

s.t. diversify {j in ASSETS}: w[j] <= w_ub;
Warning:
mad_portfolio_risk_adjusted.mod
line 22 offset 380
t is not defined
context: maximize MAD:risk_aversion[t >>> in <<< Time](sum{j in ASSETS}(mean_return[j]-R_min[j])/(R_min[j]-R_max[j]))-(1-risk_aversion[t in TIME])(((sum{t in TIME}(u[t] + v[t]) / card(TIME))-G_min[t])/(G_max[t]-G_min[t]));

In the final part of the MAD in python AMPL book,for loop iterates to two model,
for color, m in zip([“ro”, “go”], [mad_portfolio(assets), mad_portfolio_cash(assets)]):
for R in np.linspace(0, mean_return.max(), 20):
m.param[“R”] = R
m.option[“solver”] = SOLVER
m.solve()
mad_portfolio_weights = m.var[“w”].to_pandas()
portfolio_returns = daily_returns.dot(mad_portfolio_weights)
portfolio_mean_return = portfolio_returns.mean()
portfolio_mean_absolute_deviation = abs(
portfolio_returns - portfolio_mean_return
).mean()
ax.plot(portfolio_mean_absolute_deviation, portfolio_mean_return, color, ms=10)
I don t understand,I want to use one model?How?