import yfinance as yf   
from datetime import datetime
from datetime import timedelta
import time
import pathlib

import pandas as pd
import numpy as np

import tsai
from tsai.all import *
import wandb
from scipy.ndimage.interpolation import shift
from fastai.callback.wandb import *

project = 'ts-rnn'
# Get the data for the stock Apple by specifying the stock ticker, start date, and end date 


# Multi day dataframe
poll_freq = '1m'
end = datetime.today().date()-timedelta(days=4)
start = end-timedelta(days=7)
tickers = ['AAPL']
data_1m = yf.download(tickers=tickers, end=end, start=start, interval=poll_freq)
data_1m = data_1m.sort_index()
data_1m.index = data_1m.index.tz_convert(None)

data_1m['ts'] = data_1m.index
row_1 = data_1m.iloc[0]

data_1m['tsHour'] = data_1m.apply(lambda row: row['ts'].to_pydatetime().hour , axis=1)
data_1m['tsMinute'] = data_1m.apply(lambda row: row['ts'].to_pydatetime().minute , axis=1)
data_1m['tsSecond'] = data_1m.apply(lambda row: row['ts'].to_pydatetime().second , axis=1)

start_dt = pd.Timestamp(year=row_1['ts'].year, month=row_1['ts'].month, day=row_1['ts'].day, hour=9, minute=30, second=0)
data_1m = add_datepart(data_1m, 'ts')
[*********************100%***********************]  1 of 1 completed

# feature eng

# day changes
def is_new_day(this_day, last_day):
    if pd.isnull(this_day):
        return True
    
    if pd.isnull(last_day):
        return True
    
    if last_day != this_day:
        return True
    
    return False

def create_template_row(df, fixed_val=np.nan):
    return {col: fixed_val for col in df.columns}

def offset_dateindex(ts, interval, units):
    if interval == '1m':
        return ts + pd.DateOffset(minutes=units)
    if interval == '2m':
        return ts + pd.DateOffset(minutes=2*units)
    
def insert_end_of_day_rows(df, first_daily_obs, end_day_val):
    new_df = [df]
    for obs_idx in first_daily_obs:
        first_obs = df.iloc[obs_idx]
        new_line_idx = offset_dateindex(
            ts=first_obs.name, 
            interval=poll_freq,
            units=-1
        )
        new_line_template = create_template_row(
            df, fixed_val=end_day_val
        )
        new_df.append(
            pd.DataFrame(new_line_template, index=[new_line_idx])
        )
    
    return pd.concat(new_df, axis=0).sort_index()

days = data_1m[['tsYear', 'tsMonth', 'tsDay']].drop_duplicates()
days.iloc[-2]
tsYear     2020
tsMonth      12
tsDay        23
Name: 2020-12-23 14:30:00, dtype: int64

data_1m['tsLastDay'] = data_1m['tsDay'].shift(1)
data_1m['isNewDay'] = data_1m.apply(
    lambda x:
    is_new_day(this_day=x['tsDay'], last_day=x['tsLastDay']), axis=1
)

Notes: There were issues installing tsai. Both sktime and tsai were installed from forks. TODO: need to add upstreams to both of those forks to keep them up to date.

Plan

  1. train self supervise the network by using the previous minutes to predict the next minutes average
  2. obtain labeled segments

use cases

  • predict reversals in the next timestamp
  • predict future trend

class ModelRun:
    def __init__(self, checkpoint):
        self.checkpoint = checkpoint
        self.savetodir = None
        self.savetoname = None
        self.dls = self.get_dls()
        self.learner = self.load()
    
    
    def load(self):
        pass
    
    
    def save(self):
        pass
    
    
    def load_existing_checkpoint(self, name, fp, dls, cbs=None):
        """Loads and existing model phase"""

        model = InceptionTime(dls.vars, dls.c)
        learn = Learner(
            dls, model,
            metrics=[mae, mse], 
            cbs=cbs
        )

        learn.model_dir = learn.model_dir/name
        return learn.load(fp)
    
    
    def save_training_run(self, name, fp, dls, learn):
        learn.model_dir = learn.model_dir/name
        learn.save(fp)


def create_df(X, splits, window_length, horizon, tfms, train_bs, valid_bs):
    X1, y1 = SlidingWindow(window_length, horizon=horizon)(X)
    splits1 = (L(splits[0]), L(splits[1][:-window_length]))
    ds = TSDatasets(X=X1, y=y1, tfms=tfms, splits=splits1)
    dls = TSDataLoaders.from_dsets(ds.train, ds.valid, bs=[train_bs, valid_bs], shuffle_train=False)
    return ds, dls, X1, y1, splits1


def create_base_learner(fp, dls):
    """Initializes a model with randomized weights."""
    print('create_base_learner', fp)
    model = InceptionTime(dls.vars, dls.c)
    learn = Learner(
        dls, model, metrics=[mae, mse]
    )
    return learn.save(fp)


def load_existing_checkpoint(fp, dls, cbs=None):
    """Loads and existing model phase"""
    print('load_existing_checkpoint', fp)
    model = InceptionTime(dls.vars, dls.c)
    learn = Learner(
        dls, model,
        metrics=[mae, mse], 
        cbs=cbs
    )

    return learn.load(fp)


def save_training_run(fp, dls, learn):
    """Saves a trained model"""
    print('save_training_run', fp)
    dlss[fp] = dls
    learn.save(fp,)
    print('save_training_run success')
    

def save_dataloader(model_dlss, model_id, dls_fp, dls):
    """Saves data loader."""
    if Path(dls_fp).exists():
        return
    
    torch.save(dls, dls_fp)
    model_dlss[model_id] = dls_fp
    
def load_dataloader(model_dlss, dls_fp):
    """Loads dataloader"""
    if not Path(dls_fp)
    return torch.load(fp)

import torch
torch.save(obj=dls, f=Path('data/ts-rnn/dls.pk'))

# separate ds initialization
X_feature = 'Close'
y_feature = 'Close'

# y_offset=1 means X(t) :-> y=X(t+1), y_offset=2 means X(t) :-> y=X(t+2), ..
# 
y_offset, window_length, horizon = 1,1,1
tfms  = [None, [ToFloat(), ToNumpyTensor()]]
train_bs, valid_bs = 64, 128

training_end_ts = datetime(days.iloc[-2]['tsYear'],days.iloc[-2]['tsMonth'],days.iloc[-2]['tsDay'], 23, 59, 0) # less than or equal to this date
reqd_cols = set([X_feature, y_feature, 'isNewDay',])
end_day_val = 0. # TODO: Experiment with different values
df = data_1m[reqd_cols]

print('Training end timestamp', training_end_ts)
noise_reduction_threshold = 3 #decimals
first_daily_obs = list(reversed(np.where(df['isNewDay'])[0]))
df = insert_end_of_day_rows(
    df=df, 
    first_daily_obs=first_daily_obs,
    end_day_val=end_day_val
)

del df['isNewDay']
cond = (df.index <= training_end_ts)        
X = df[X_feature].values
y = shift(input=df[y_feature].values, shift=y_offset, cval=np.NaN)

train_idx = np.where( cond)[0]
valid_idx = np.where(~cond)[0]

splits = (list(train_idx),list(valid_idx))
print('training:\t', len(train_idx), '\ntesting:\t', len(valid_idx))

ds, dls, X1, y1, splits1 = create_df(
    X=X,
    splits=splits,
    window_length=window_length, 
    horizon=horizon,
    tfms=tfms,
    train_bs=train_bs, 
    valid_bs=valid_bs
)

config_log = {
    "poll_freq": poll_freq,
    "ds.request_end": end,
    "ds.request_start": start,
    "tickers": tickers,
    "ds.actual_start": start_dt,
    "X_feature": X_feature,
    "y_feature": y_feature,
    "training_end_ts": training_end_ts,
    "noise_reduction_threshold": noise_reduction_threshold,
    "first_daily_obs": first_daily_obs,
    "window_length": window_length,
    "horizon": horizon,
    "tfms": tfms,
    "train_bs": train_bs,
    "valid_bs": valid_bs,
    "y_offset": y_offset
}
Training end timestamp 2020-12-23 23:59:00
training:	 1171 
testing:	 212

# initialize phase
p0_name = "single-dsconfig-training-phase0_sweep"
next_phase_name = "single-dsconfig-training-phase1_sweep"
p = pathlib.Path('models')/next_phase_name
p.mkdir(parents=True, exist_ok=True)

N = 10
phase_0s = []
dlss = {}
logs = {}
for i in range(N):
    fp_in = os.path.join(p0_name, str(i))
    fp_out = create_base_learner(fp=fp_in, dls=dls)
    phase_0s.append(fp_in)
    dlss[fp_in] = dls
    logs[fp_in] = config_log
create_base_learner single-dsconfig-training-phase0_sweep/0
create_base_learner single-dsconfig-training-phase0_sweep/1
create_base_learner single-dsconfig-training-phase0_sweep/2
create_base_learner single-dsconfig-training-phase0_sweep/3
create_base_learner single-dsconfig-training-phase0_sweep/4
create_base_learner single-dsconfig-training-phase0_sweep/5
create_base_learner single-dsconfig-training-phase0_sweep/6
create_base_learner single-dsconfig-training-phase0_sweep/7
create_base_learner single-dsconfig-training-phase0_sweep/8
create_base_learner single-dsconfig-training-phase0_sweep/9

p0_config = {
    "name": p0_name,
    "method": "grid",
    "metric": {
        "name": "mse",
        "goal": "minimize"
    },
    "parameters": {
            "checkpoint": {
                "values": phase_0s
            },
            "n_epoch": {
                "values": [1, 3, 5, 10, 20, 40, 80]
            },
            "lr_max": {
                "values": [1e-1 ,1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
            },
            "div": {
                "values": [10., 25., 50., 75.]
            }
    }
}

sweep_id = wandb.sweep(p0_config, project=project)
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Create sweep with ID: vbyp7u8r
Sweep URL: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r

def get_config(phase):
    name = "single-dsconfig-training-phase{}_sweep".format(phase)
    phasepath = Path('models')/name
    checkpoints = phasepath.ls()
    config = {
        "name": "single-dsconfig-training-phase_sweep",
        "method": "random",
        "metric": {
            "name": "mse",
            "goal": "minimize"
        },
        "parameters": {
                "checkpoint": {
                    "values": checkpoints
                },
                "n_epoch": {
                    "values": [3, 5, 10, 20]
                },
                "lr_max": {
                    "values": [1e-1 ,1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
                },
                "div": {
                    "values": [10., 25., 50., 75.]
                }
        }
    }
    
    return config
    

def train():
    default_config = {
        "checkpoint": phase_1s[0],
        "n_epoch": 3,
        "lr_max": 1e-06,
        "div": 50.
    }
    
    wandb.init(config={**default_config, **config_log})
    dls = dlss[default_config["checkpoint"]]
    
    # TODO: find a better way to manage next phase dirname
    learn = load_existing_checkpoint(
        fp=default_config["checkpoint"], 
        dls=dls,
        cbs=[WandbCallback(
            log_model=True, 
            log_preds=True,
            n_preds=len(valid_idx),
        ), SaveModelCallback()]
    )
    
    config = wandb.config
    learn.fit_one_cycle(
        config.n_epoch, 
        lr_max=config.lr_max,
        div=config.div
    )  
    outfp = os.path.join(next_phase_name, wandb.run.name)  
    save_training_run(
        fp=outfp,
        learn=learn,
        dls=dls
    )
    
    preds,targs = learn.get_preds()
    preds_df = pd.DataFrame(preds.flatten(), columns=['preds'])
    targs_df = pd.DataFrame(targs.flatten(), columns=['targs'])
    results = pd.concat([preds_df, targs_df], axis=1)
    wandb.log(
        {
            "predictions": results.plot()
        }
    )

wandb.agent(sweep_id=sweep_id, function=train, )
wandb: Agent Starting Run: enscz6k1 with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 0.0001
wandb: 	n_epoch: 5
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run driven-sweep-24 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/enscz6k1
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_200741-enscz6k1

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
epoch train_loss valid_loss mae mse time
0 16851.765625 17094.232422 130.742371 17094.232422 00:01
1 16785.460938 17414.597656 131.963181 17414.597656 00:01
2 16717.101562 17452.841797 132.108109 17452.841797 00:01
3 16660.125000 17421.761719 131.988907 17421.761719 00:01
4 16621.132812 17386.763672 131.854050 17386.763672 00:01
Better model found at epoch 0 with valid_loss value: 17094.232421875.
wandb: Adding directory to artifact (/tmp/tmp9oah3rba)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/driven-sweep-24
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 7270
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200741-enscz6k1/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200741-enscz6k1/logs/debug-internal.log

Run summary:


epoch 5
train_loss 16621.13281
raw_loss 16067.28613
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94995
eps_0 1e-05
_step 95
_runtime 12
_timestamp 1609380473
valid_loss 17386.76367
mae 131.85405
mse 17386.76367

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▂▁▂▅▆▇███▇▆▇▇▇▇█▇▇▆▇▇▇▇▇▇▆▆▆▇▇▇▇▇▆▆▆▆▇▇▇
raw_loss ▃▃▄██▇▇▇▃▃▄██▇▇▇▂▂▃▇▇▆▇▇▂▃▃▆▇▆▇▇▁▂▃▆▇▆▇▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▆▇███████▇▇▇▇▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ███▇▆▄▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▂▂▂▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▅▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇▇▇▇█
_timestamp ▁▂▂▂▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▅▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇▇▇▇█
valid_loss ▁▇█▇▇
mae ▁▇█▇▇
mse ▁▇█▇▇

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Sweep Agent: Waiting for job.
wandb: Job received.
wandb: Agent Starting Run: 59z9gu7r with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 0.0001
wandb: 	n_epoch: 10
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run graceful-sweep-25 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/59z9gu7r
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_200814-59z9gu7r

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16863.748047 17122.673828 130.850800 17122.673828 00:01
1 16818.023438 17425.871094 132.005035 17425.871094 00:01
2 16754.675781 17453.398438 132.109451 17453.398438 00:01
3 16683.044922 17422.099609 131.991074 17422.099609 00:01
4 16613.001953 17367.019531 131.782394 17367.019531 00:01
5 16550.369141 17261.785156 131.382187 17261.785156 00:01
6 16496.580078 17283.605469 131.465286 17283.605469 00:01
7 16453.351562 17280.964844 131.455246 17280.964844 00:01
8 16420.773438 17260.287109 131.376450 17260.287109 00:01
9 16397.923828 17260.642578 131.377579 17260.642578 00:01
Better model found at epoch 0 with valid_loss value: 17122.673828125.
wandb: Adding directory to artifact (/tmp/tmpa70l28i0)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/graceful-sweep-25
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 7609
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200814-59z9gu7r/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200814-59z9gu7r/logs/debug-internal.log

Run summary:


epoch 10
train_loss 16397.92383
raw_loss 15865.48047
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94999
eps_0 1e-05
_step 190
_runtime 21
_timestamp 1609380515
valid_loss 17260.64258
mae 131.37758
mse 17260.64258

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▁▂▆██▇▇█▇▇▇█▇▇▇▇▇▆▇▇▆▆▆▇▆▆▆▆▆▅▆▆▅▅▆▆▅▅▅▆
raw_loss ▂▄██▃▇█▇▂▄▇▇▃▇▇▇▁▃▇▇▂▆▆▆▁▇▆▆▂▆▆▆▂▆▆▆▂▆▆▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▇▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▆▄▃▂▂▁▁▁▁▁▁▂▂▂▂▂▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▂▂▂▂▂▂▂▂▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇▇██
_timestamp ▁▁▁▁▂▂▂▂▂▂▂▂▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇▇██
valid_loss ▁▇█▇▆▄▄▄▄▄
mae ▁▇█▇▆▄▄▄▄▄
mse ▁▇█▇▆▄▄▄▄▄

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: q5qzglvw with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 0.0001
wandb: 	n_epoch: 20
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run honest-sweep-26 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/q5qzglvw
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_200842-q5qzglvw

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16867.824219 17132.888672 130.889725 17132.888672 00:01
1 16839.199219 17433.878906 132.034576 17433.878906 00:01
2 16801.939453 17463.720703 132.146637 17463.720703 00:01
3 16752.289062 17461.597656 132.138443 17461.597656 00:01
4 16690.041016 17409.464844 131.942780 17409.464844 00:01
5 16621.437500 17337.548828 131.670731 17337.548828 00:01
6 16552.236328 17287.386719 131.480652 17287.386719 00:01
7 16486.255859 17292.101562 131.497726 17292.101562 00:01
8 16424.000000 17217.488281 131.211563 17217.488281 00:01
9 16366.322266 16441.832031 128.220978 16441.832031 00:01
10 16314.892578 17180.570312 131.063293 17180.570312 00:01
11 16267.355469 17145.031250 130.924408 17145.031250 00:01
12 16224.870117 17115.468750 130.808517 17115.468750 00:01
13 16187.346680 17111.597656 130.790070 17111.597656 00:01
14 16155.184570 17069.980469 130.627670 17069.980469 00:01
15 16128.502930 17074.386719 130.641922 17074.386719 00:01
16 16107.181641 17059.001953 130.581757 17059.001953 00:02
17 16090.777344 17060.320312 130.586044 17060.320312 00:01
18 16078.731445 17060.767578 130.587402 17060.767578 00:01
19 16070.275391 17061.562500 130.590424 17061.562500 00:01
Better model found at epoch 0 with valid_loss value: 17132.888671875.
Better model found at epoch 9 with valid_loss value: 16441.83203125.
wandb: Adding directory to artifact (/tmp/tmpy0es5poq)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/honest-sweep-26
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 8188
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200842-q5qzglvw/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200842-q5qzglvw/logs/debug-internal.log

Run summary:


epoch 20
train_loss 16070.27539
raw_loss 15582.51172
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.95
eps_0 1e-05
_step 380
_runtime 41
_timestamp 1609380563
valid_loss 17061.5625
mae 130.59042
mse 17061.5625

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▁▇████▇▇▇▇▆▇▇▆▆▆▅▆▅▅▄▅▄▅▄▄▄▄▃▄▃▄▃▄▃▄▃▄▃▃
raw_loss ▄█▃▇▂▇▃█▃▇▃▆▁▆▂▆▂▆▂▆▂▆▆▆▂▆▂▆▂▅▆▅▆▆▂▅▂▅▆▃
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▆▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▅▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇▇██
_timestamp ▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇▇██
valid_loss ▆████▇▇▇▆▁▆▆▆▆▅▅▅▅▅▅
mae ▆████▇▇▇▆▁▆▆▆▆▅▅▅▅▅▅
mse ▆████▇▇▇▆▁▆▆▆▆▅▅▅▅▅▅

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: ya8ogxgc with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 0.0001
wandb: 	n_epoch: 40
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run glorious-sweep-27 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/ya8ogxgc
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_200935-ya8ogxgc

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16868.968750 17136.835938 130.904785 17136.835938 00:01
1 16848.242188 17440.337891 132.058838 17440.337891 00:01
2 16824.509766 17474.201172 132.184906 17474.201172 00:01
3 16797.003906 17489.435547 132.232437 17489.435547 00:01
4 16762.984375 17436.039062 132.027267 17436.039062 00:01
5 16720.851562 17305.435547 131.537766 17305.435547 00:01
6 16670.619141 17322.958984 131.612320 17322.958984 00:01
7 16614.996094 17159.644531 130.992523 17159.644531 00:01
8 16554.523438 17280.388672 131.454147 17280.388672 00:01
9 16493.986328 17240.910156 131.302536 17240.910156 00:01
10 16432.507812 17218.324219 131.215179 17218.324219 00:01
11 16371.637695 17195.798828 131.124664 17195.798828 00:01
12 16311.836914 17155.773438 130.968170 17155.773438 00:01
13 16253.294922 17144.005859 130.913879 17144.005859 00:01
14 16196.915039 16842.351562 129.750626 16842.351562 00:01
15 16141.201172 17083.568359 130.662430 17083.568359 00:01
16 16086.743164 16850.421875 129.759445 16850.421875 00:01
17 16034.440430 17008.308594 130.355576 17008.308594 00:01
18 15984.509766 16981.875000 130.236404 16981.875000 00:01
19 15935.457031 17011.630859 130.336441 17011.630859 00:01
20 15888.824219 16945.287109 130.072052 16945.287109 00:01
21 15844.999023 16983.332031 130.198898 16983.332031 00:01
22 15803.041016 16874.208984 129.768845 16874.208984 00:01
23 15763.515625 16790.880859 129.435135 16790.880859 00:01
24 15726.163086 16898.494141 129.841171 16898.494141 00:01
25 15691.458008 16870.443359 129.722198 16870.443359 00:01
26 15659.495117 16820.820312 129.534393 16820.820312 00:01
27 15630.248047 16775.466797 129.350357 16775.466797 00:01
28 15603.407227 16809.470703 129.479416 16809.470703 00:01
29 15579.450195 16743.490234 129.220825 16743.490234 00:01
30 15558.423828 16818.669922 129.505325 16818.669922 00:01
31 15540.255859 16741.880859 129.205276 16741.880859 00:01
32 15524.796875 16785.113281 129.369064 16785.113281 00:01
33 15511.831055 16712.759766 129.088699 16712.759766 00:01
34 15501.209961 16698.632812 129.032715 16698.632812 00:01
35 15492.711914 16695.896484 129.020630 16695.896484 00:01
36 15486.111328 16691.992188 129.004913 16691.992188 00:01
37 15481.150391 16688.164062 128.989838 16688.164062 00:01
38 15477.551758 16686.958984 128.985092 16686.958984 00:01
39 15475.038086 16686.474609 128.983246 16686.474609 00:01
Better model found at epoch 0 with valid_loss value: 17136.8359375.
Better model found at epoch 14 with valid_loss value: 16842.3515625.
Better model found at epoch 23 with valid_loss value: 16790.880859375.
Better model found at epoch 27 with valid_loss value: 16775.466796875.
Better model found at epoch 29 with valid_loss value: 16743.490234375.
Better model found at epoch 31 with valid_loss value: 16741.880859375.
Better model found at epoch 33 with valid_loss value: 16712.759765625.
Better model found at epoch 34 with valid_loss value: 16698.6328125.
Better model found at epoch 35 with valid_loss value: 16695.896484375.
Better model found at epoch 36 with valid_loss value: 16691.9921875.
Better model found at epoch 37 with valid_loss value: 16688.1640625.
Better model found at epoch 38 with valid_loss value: 16686.958984375.
Better model found at epoch 39 with valid_loss value: 16686.474609375.
wandb: Adding directory to artifact (/tmp/tmpz4xa287l)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/glorious-sweep-27
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 9247
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200935-ya8ogxgc/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_200935-ya8ogxgc/logs/debug-internal.log

Run summary:


epoch 40
train_loss 15475.03809
raw_loss 15026.24512
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.95
eps_0 1e-05
_step 760
_runtime 79
_timestamp 1609380654
valid_loss 16686.47461
mae 128.98325
mse 16686.47461

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▅▇███▇▇▇▇▆▆▆▅▅▅▅▄▄▄▃▃▃▃▃▂▂▂▂▂▁▂▁▁▁▁▁▁▁▁▁
raw_loss █▄▂▇▃▇▃▂▇▃▆▆▂▅▂▆▅▁▅▅▅▄▄▅▄▄▄▄▄▃▃▄▄▃▄▃▄▄▃▁
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▆▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▅▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇███
_timestamp ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇███
valid_loss ▅████▆▇▅▆▆▆▅▅▅▂▄▂▄▄▄▃▄▃▂▃▃▂▂▂▁▂▁▂▁▁▁▁▁▁▁
mae ▅████▇▇▅▆▆▆▆▅▅▃▅▃▄▄▄▃▄▃▂▃▃▂▂▂▂▂▁▂▁▁▁▁▁▁▁
mse ▅████▆▇▅▆▆▆▅▅▅▂▄▂▄▄▄▃▄▃▂▃▃▂▂▂▁▂▁▂▁▁▁▁▁▁▁

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: 8ejt9cr0 with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 0.0001
wandb: 	n_epoch: 80
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run young-sweep-28 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/8ejt9cr0
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201108-8ejt9cr0

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16869.257812 17137.013672 130.905457 17137.013672 00:01
1 16850.804688 17441.402344 132.062790 17441.402344 00:01
2 16832.667969 17477.330078 132.196335 17477.330078 00:01
3 16813.412109 17503.720703 132.281219 17503.720703 00:01
4 16792.462891 17490.791016 132.222321 17490.791016 00:01
5 16769.287109 17451.078125 132.078705 17451.078125 00:01
6 16743.125000 17436.800781 132.027786 17436.800781 00:01
7 16713.744141 17420.609375 131.975861 17420.609375 00:01
8 16679.734375 17381.044922 131.829483 17381.044922 00:01
9 16642.068359 17375.595703 131.811722 17375.595703 00:01
10 16599.986328 17334.763672 131.659607 17334.763672 00:01
11 16554.986328 17331.441406 131.647339 17331.441406 00:01
12 16508.843750 17324.857422 131.622665 17324.857422 00:01
13 16461.917969 17276.437500 131.437561 17276.437500 00:01
14 16413.794922 17250.800781 131.340363 17250.800781 00:01
15 16364.436523 17204.869141 131.163635 17204.869141 00:01
16 16313.495117 17174.109375 131.044647 17174.109375 00:01
17 16261.653320 17224.951172 131.232269 17224.951172 00:01
18 16209.461914 17011.435547 130.402603 17011.435547 00:01
19 16154.955078 17082.398438 130.667343 17082.398438 00:01
20 16100.580078 17011.433594 130.377579 17011.433594 00:01
21 16045.573242 17017.248047 130.387314 17017.248047 00:01
22 15990.035156 16976.095703 130.221558 16976.095703 00:01
23 15934.102539 16887.833984 129.874191 16887.833984 00:01
24 15877.906250 16911.458984 129.940872 16911.458984 00:01
25 15822.751953 16914.113281 129.936508 16914.113281 00:01
26 15768.279297 16959.685547 130.088623 16959.685547 00:01
27 15713.018555 16998.724609 130.212753 16998.724609 00:01
28 15657.997070 16798.298828 129.425400 16798.298828 00:01
29 15603.409180 16786.078125 129.357727 16786.078125 00:01
30 15548.836914 16703.484375 129.030258 16703.484375 00:01
31 15494.398438 16708.650391 129.022079 16708.650391 00:01
32 15440.585938 16807.248047 129.390701 16807.248047 00:01
33 15388.107422 16435.195312 127.934128 16435.195312 00:01
34 15335.736328 16462.425781 128.016846 16462.425781 00:01
35 15284.082031 16694.603516 128.915863 16694.603516 00:01
36 15233.023438 15922.021484 125.921989 15922.021484 00:01
37 15182.677734 16471.378906 128.111618 16471.378906 00:01
38 15133.390625 16446.744141 128.022141 16446.744141 00:01
39 15084.399414 16454.222656 128.066132 16454.222656 00:01
40 15036.383789 16221.137695 127.166962 16221.137695 00:01
41 14989.958008 16126.601562 126.818680 16126.601562 00:01
42 14944.378906 16356.822266 127.715233 16356.822266 00:01
43 14899.675781 16018.284180 126.385941 16018.284180 00:01
44 14856.560547 15812.881836 125.601227 15812.881836 00:01
45 14814.570312 16360.370117 127.758057 16360.370117 00:01
46 14773.966797 15624.066406 124.857079 15624.066406 00:01
47 14734.716797 15811.144531 125.600220 15811.144531 00:01
48 14696.546875 15703.860352 125.172295 15703.860352 00:01
49 14659.550781 15820.836914 125.642372 15820.836914 00:01
50 14624.221680 16087.708984 126.694527 16087.708984 00:01
51 14590.236328 16414.070312 127.973030 16414.070312 00:01
52 14557.952148 15773.204102 125.446091 15773.204102 00:01
53 14527.762695 15752.934570 125.371223 15752.934570 00:01
54 14499.076172 15874.183594 125.853745 15874.183594 00:01
55 14471.650391 15869.708984 125.839699 15869.708984 00:01
56 14445.521484 15778.150391 125.471039 15778.150391 00:01
57 14420.866211 15710.672852 125.212860 15710.672852 00:01
58 14397.514648 15934.793945 126.101578 15934.793945 00:01
59 14375.751953 15700.195312 125.164146 15700.195312 00:02
60 14355.648438 15536.324219 124.518661 15536.324219 00:01
61 14337.143555 16217.024414 127.220970 16217.024414 00:01
62 14320.227539 15664.918945 125.032066 15664.918945 00:01
63 14304.718750 15597.861328 124.767197 15597.861328 00:01
64 14290.696289 15615.498047 124.836067 15615.498047 00:01
65 14277.873047 15618.549805 124.847084 15618.549805 00:01
66 14266.379883 15637.005859 124.922401 15637.005859 00:01
67 14256.192383 15610.528320 124.817131 15610.528320 00:01
68 14247.232422 15589.907227 124.736023 15589.907227 00:01
69 14239.430664 15618.666016 124.850433 15618.666016 00:01
70 14232.706055 15602.001953 124.784561 15602.001953 00:01
71 14226.975586 15599.855469 124.775902 15599.855469 00:01
72 14222.163086 15606.209961 124.801208 15606.209961 00:01
73 14218.184570 15606.207031 124.801468 15606.207031 00:01
74 14214.954102 15607.133789 124.805130 15607.133789 00:01
75 14212.393555 15610.329102 124.817856 15610.329102 00:01
76 14210.413086 15612.440430 124.826271 15612.440430 00:01
77 14208.930664 15613.489258 124.830475 15613.489258 00:01
78 14207.857422 15614.208984 124.833344 15614.208984 00:01
79 14207.110352 15614.526367 124.834625 15614.526367 00:01
Better model found at epoch 0 with valid_loss value: 17137.013671875.
Better model found at epoch 18 with valid_loss value: 17011.435546875.
Better model found at epoch 20 with valid_loss value: 17011.43359375.
Better model found at epoch 22 with valid_loss value: 16976.095703125.
Better model found at epoch 23 with valid_loss value: 16887.833984375.
Better model found at epoch 28 with valid_loss value: 16798.298828125.
Better model found at epoch 29 with valid_loss value: 16786.078125.
Better model found at epoch 30 with valid_loss value: 16703.484375.
Better model found at epoch 33 with valid_loss value: 16435.1953125.
Better model found at epoch 36 with valid_loss value: 15922.021484375.
Better model found at epoch 44 with valid_loss value: 15812.8818359375.
Better model found at epoch 46 with valid_loss value: 15624.06640625.
Better model found at epoch 60 with valid_loss value: 15536.32421875.
wandb: Adding directory to artifact (/tmp/tmpxgs820sv)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/young-sweep-28
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 11266
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201108-8ejt9cr0/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201108-8ejt9cr0/logs/debug-internal.log

Run summary:


epoch 80
train_loss 14207.11035
raw_loss 13846.10547
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.95
eps_0 1e-05
_step 1520
_runtime 158
_timestamp 1609380826
valid_loss 15614.52637
mae 124.83463
mse 15614.52637

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ████▇▇▇▇▇▆▆▆▅▅▅▄▄▄▄▄▃▃▃▂▂▂▂▂▂▂▁▁▁▁▁▁▁▁▁▁
raw_loss ██▅▇█▇▇▅▇▇▄▆▆▆▆▃▅▅▃▅▃▄▄▂▄▄▂▄▂▄▃▁▄▄▁▄▂▃▃▂
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▆▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▅▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_timestamp ▁▁▁▁▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
valid_loss ▇████▇▇▇▇▆▆▆▆▆▆▅▆▄▂▄▃▄▂▁▂▃▂▂▂▂▁▁▁▁▁▁▁▁▁▁
mae ▇████▇▇▇▇▆▆▆▆▆▅▅▅▄▂▄▃▄▂▁▂▃▂▂▂▂▁▁▁▁▁▁▁▁▁▁
mse ▇████▇▇▇▇▆▆▆▆▆▆▅▆▄▂▄▃▄▂▁▂▃▂▂▂▂▁▁▁▁▁▁▁▁▁▁

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: 0ikixfvf with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 1
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run exalted-sweep-29 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/0ikixfvf
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201420-0ikixfvf

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16874.994141 17144.140625 130.932632 17144.140625 00:01
Better model found at epoch 0 with valid_loss value: 17144.140625.
wandb: Adding directory to artifact (/tmp/tmppommv0iy)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/exalted-sweep-29
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 15229
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201420-0ikixfvf/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201420-0ikixfvf/logs/debug-internal.log

Run summary:


epoch 1
train_loss 16874.99414
raw_loss 16366.72559
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94879
eps_0 1e-05
_step 19
_runtime 5
_timestamp 1609380865
valid_loss 17144.14062
mae 130.93263
mse 17144.14062

Run history:


epoch ▁▁▂▂▃▃▃▄▄▅▅▅▆▆▆▇▇██
train_loss ▂▁▁▂▂▂▄▅▅▆▆▇▇▇▇████
raw_loss ▂▁▂▃▃▃▇█▇██▇▆▇▇▇▇▇▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▄▆████▇▇▆▅▄▄▃▂▂▁▁
mom_0 █▇▅▃▁▁▁▁▂▂▃▄▅▅▆▇▇██
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▂▂▂▃▃▄▄▄▅▅▅▆▆▇▇▇██
_runtime ▁▁▁▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▆█
_timestamp ▁▁▁▃▃▃▃▃▃▃▃▃▃▃▃▃▃▃▆█
valid_loss
mae
mse

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Sweep Agent: Waiting for job.
wandb: Job received.
wandb: Agent Starting Run: zlt8ycjo with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 3
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run volcanic-sweep-30 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/zlt8ycjo
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201442-zlt8ycjo

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16876.732422 17141.242188 130.921616 17141.242188 00:01
1 16862.562500 17441.089844 132.061646 17441.089844 00:01
2 16851.972656 17481.369141 132.211151 17481.369141 00:01
Better model found at epoch 0 with valid_loss value: 17141.2421875.
wandb: Adding directory to artifact (/tmp/tmpuzte0ht8)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/volcanic-sweep-30
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 15376
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201442-zlt8ycjo/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201442-zlt8ycjo/logs/debug-internal.log

Run summary:


epoch 3
train_loss 16851.97266
raw_loss 16339.2666
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94987
eps_0 1e-05
_step 57
_runtime 8
_timestamp 1609380891
valid_loss 17481.36914
mae 132.21115
mse 17481.36914

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▂▁▁▂▂▅▅▆▇▇▇███▇▇▇▆▆▇▇▇▇█████▇▇▇▇▇▇▇▇▇███
raw_loss ▂▁▂▃▃█▇█▇▆▇▇▇▄▁▂▃▃▇▇█▇▆▇▇▇▄▂▂▃▃▇███▆▇▇▇▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▃▅▅▇▇███████▇▇▇▇▆▆▆▅▅▅▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ███▇▆▅▄▂▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▁▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▁▁▁▁▁▁▂▂▂▃▃▃▃▃▃▃▃▃▃▅▅▅▆▆▆▆▆▆▆▆▆▆▆▇▇█
_timestamp ▁▁▁▁▁▁▁▁▁▁▂▂▂▃▃▃▃▃▃▃▃▃▃▅▅▅▆▆▆▆▆▆▆▆▆▆▆▇▇█
valid_loss ▁▇█
mae ▁▇█
mse ▁▇█

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Sweep Agent: Waiting for job.
wandb: Job received.
wandb: Agent Starting Run: pgypq5sb with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 5
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run sleek-sweep-31 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/pgypq5sb
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201509-pgypq5sb

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16878.900391 17145.847656 130.939163 17145.847656 00:01
1 16865.958984 17442.156250 132.065613 17442.156250 00:01
2 16850.605469 17480.759766 132.208832 17480.759766 00:01
3 16838.125000 17508.949219 132.301270 17508.949219 00:01
4 16829.718750 17522.125000 132.333313 17522.125000 00:01
Better model found at epoch 0 with valid_loss value: 17145.84765625.
wandb: Adding directory to artifact (/tmp/tmpbvvngakm)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/sleek-sweep-31
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 15619
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201509-pgypq5sb/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201509-pgypq5sb/logs/debug-internal.log

Run summary:


epoch 5
train_loss 16829.71875
raw_loss 16319.44043
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94995
eps_0 1e-05
_step 95
_runtime 12
_timestamp 1609380921
valid_loss 17522.125
mae 132.33331
mse 17522.125

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▂▁▂▄▆▇███▇▆▇▇▇███▇▇▇▇▇███▇▇▇▇▇██▇▇▇▇▇███
raw_loss ▂▂▃██▇▇█▂▂▃██▇▇▇▂▂▃██▆█▇▂▃▃▇█▆▇▇▁▃▃▇█▇▇▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▆▇███████▇▇▇▇▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ███▇▆▄▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▁▂▂▂▃▃▃▃▃▃▃▃▄▄▄▄▄▄▄▆▆▆▆▆▆▆▆▆▇▇▇▇▇▇▇█
_timestamp ▁▁▁▁▁▂▂▂▃▃▃▃▃▃▃▃▄▄▄▄▄▄▄▆▆▆▆▆▆▆▆▆▇▇▇▇▇▇▇█
valid_loss ▁▇▇██
mae ▁▇▇██
mse ▁▇▇██

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: v01sfw0a with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 10
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run dark-sweep-32 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/v01sfw0a
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201527-v01sfw0a

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16880.458984 17149.529297 130.953201 17149.529297 00:01
1 16873.070312 17444.021484 132.072510 17444.021484 00:01
2 16859.412109 17481.353516 132.211044 17481.353516 00:01
3 16843.810547 17505.541016 132.288071 17505.541016 00:01
4 16828.460938 17524.246094 132.338242 17524.246094 00:01
5 16814.417969 17506.425781 132.268372 17506.425781 00:02
6 16802.154297 17502.203125 132.252838 17502.203125 00:01
7 16792.259766 17501.347656 132.250580 17501.347656 00:01
8 16784.847656 17502.875000 132.256836 17502.875000 00:01
9 16779.656250 17504.652344 132.263718 17504.652344 00:01
Better model found at epoch 0 with valid_loss value: 17149.529296875.
wandb: Adding directory to artifact (/tmp/tmpbz9cf5j0)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/dark-sweep-32
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 15958
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201527-v01sfw0a/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201527-v01sfw0a/logs/debug-internal.log

Run summary:


epoch 10
train_loss 16779.65625
raw_loss 16275.87793
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.94999
eps_0 1e-05
_step 190
_runtime 22
_timestamp 1609380949
valid_loss 17504.65234
mae 132.26372
mse 17504.65234

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▁▂▆██▇▇██▇███▇███▇██▇▇███▇██▇▇▇█▇▇▇█▇▇▇█
raw_loss ▁▃██▂▇█▇▁▃▇▇▂▇█▇▁▃▇▇▂▇▆▇▁▇▇▇▂▇▆▇▃▇▇▇▂▇▆▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▇▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▆▄▃▂▂▁▁▁▁▁▁▂▂▂▂▂▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▂▂▂▂▂▂▂▂▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇███
_timestamp ▁▁▁▁▂▂▂▂▂▂▂▂▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▅▆▆▆▆▇▇▇▇▇███
valid_loss ▁▇▇███████
mae ▁▇▇███████
mse ▁▇▇███████

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: fc7aljx4 with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 20
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run vocal-sweep-33 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/fc7aljx4
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201556-fc7aljx4

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16880.945312 17150.736328 130.957809 17150.736328 00:01
1 16877.128906 17446.263672 132.080872 17446.263672 00:01
2 16870.166016 17484.619141 132.222946 17484.619141 00:01
3 16859.164062 17517.058594 132.332047 17517.058594 00:01
4 16845.341797 17536.710938 132.387405 17536.710938 00:01
5 16830.156250 17517.066406 132.307373 17517.066406 00:01
6 16814.980469 17503.625000 132.256241 17503.625000 00:01
7 16799.580078 17491.130859 132.209869 17491.130859 00:01
8 16784.806641 17481.343750 132.177948 17481.343750 00:01
9 16770.957031 17477.386719 132.164749 17477.386719 00:01
10 16758.294922 17472.787109 132.151215 17472.787109 00:01
11 16746.857422 17469.808594 132.140518 17469.808594 00:01
12 16736.849609 17466.707031 132.129959 17466.707031 00:01
13 16728.126953 17465.542969 132.126495 17465.542969 00:01
14 16720.771484 17463.621094 132.119690 17463.621094 00:01
15 16714.699219 17462.357422 132.115433 17462.357422 00:01
16 16709.875000 17462.132812 132.114914 17462.132812 00:01
17 16706.187500 17462.005859 132.114578 17462.005859 00:01
18 16703.482422 17461.890625 132.114212 17461.890625 00:01
19 16701.585938 17461.886719 132.114212 17461.886719 00:01
Better model found at epoch 0 with valid_loss value: 17150.736328125.
wandb: Adding directory to artifact (/tmp/tmp7qxo7viz)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/vocal-sweep-33
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 16537
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201556-fc7aljx4/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201556-fc7aljx4/logs/debug-internal.log

Run summary:


epoch 20
train_loss 16701.58594
raw_loss 16201.1748
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.95
eps_0 1e-05
_step 380
_runtime 42
_timestamp 1609380998
valid_loss 17461.88672
mae 132.11421
mse 17461.88672

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▁▇██████▇█▇███▇█▇█▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇
raw_loss ▃█▂▇▁▇▂█▃▇▃▆▁▇▂▇▃▇▃▆▃▇▇▇▂▇▃▇▃▇▇▇▇▇▃▇▃▇▇▄
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▆▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▅▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇▇██
_timestamp ▁▁▁▁▂▂▂▂▂▂▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇▇██
valid_loss ▁▆▇███▇▇▇▇▇▇▇▇▇▇▇▇▇▇
mae ▁▆▇███▇▇▇▇▇▇▇▇▇▇▇▇▇▇
mse ▁▆▇███▇▇▇▇▇▇▇▇▇▇▇▇▇▇

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Agent Starting Run: dhwflqpu with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 40
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run golden-sweep-34 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/dhwflqpu
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201646-dhwflqpu

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

epoch train_loss valid_loss mae mse time
0 16881.080078 17151.042969 130.958969 17151.042969 00:01
1 16878.455078 17446.785156 132.082809 17446.785156 00:01
2 16874.832031 17485.751953 132.226974 17485.751953 00:01
3 16869.564453 17525.572266 132.364563 17525.572266 00:01
4 16862.164062 17551.330078 132.447708 17551.330078 00:01
5 16852.714844 17542.667969 132.409348 17542.667969 00:01
6 16841.560547 17535.132812 132.377441 17535.132812 00:01
7 16828.923828 17515.777344 132.302673 17515.777344 00:01
8 16815.234375 17508.582031 132.276382 17508.582031 00:01
9 16800.615234 17495.964844 132.230637 17495.964844 00:01
10 16786.003906 17484.613281 132.192505 17484.613281 00:01
11 16771.501953 17472.380859 132.149429 17472.380859 00:01
12 16757.445312 17466.017578 132.127762 17466.017578 00:01
13 16743.726562 17458.269531 132.101227 17458.269531 00:01
14 16730.601562 17453.017578 132.082962 17453.017578 00:01
15 16718.101562 17449.166016 132.069824 17449.166016 00:01
16 16706.189453 17442.000000 132.044708 17442.000000 00:01
17 16694.677734 17441.875000 132.045258 17441.875000 00:01
18 16683.830078 17435.544922 132.022278 17435.544922 00:01
19 16673.498047 17428.867188 131.998352 17428.867188 00:02
20 16663.734375 17423.363281 131.978271 17423.363281 00:01
21 16654.568359 17422.289062 131.974945 17422.289062 00:01
22 16645.996094 17415.966797 131.951950 17415.966797 00:01
23 16637.943359 17416.625000 131.955109 17416.625000 00:01
24 16630.496094 17410.580078 131.933075 17410.580078 00:01
25 16623.593750 17409.496094 131.929550 17409.496094 00:01
26 16617.287109 17410.453125 131.933716 17410.453125 00:01
27 16611.566406 17413.537109 131.946030 17413.537109 00:01
28 16606.435547 17412.398438 131.941986 17412.398438 00:01
29 16601.904297 17410.878906 131.936615 17410.878906 00:01
30 16597.939453 17411.490234 131.938995 17411.490234 00:01
31 16594.519531 17410.371094 131.935089 17410.371094 00:01
32 16591.611328 17410.490234 131.935699 17410.490234 00:01
33 16589.177734 17410.302734 131.935104 17410.302734 00:01
34 16587.187500 17410.230469 131.934921 17410.230469 00:01
35 16585.595703 17410.242188 131.935013 17410.242188 00:01
36 16584.357422 17410.320312 131.935364 17410.320312 00:01
37 16583.423828 17410.513672 131.936142 17410.513672 00:01
38 16582.751953 17410.679688 131.936752 17410.679688 00:01
39 16582.281250 17410.781250 131.937164 17410.781250 00:01
Better model found at epoch 0 with valid_loss value: 17151.04296875.
wandb: Adding directory to artifact (/tmp/tmpwj0lweb8)... Done. 0.1s
save_training_run single-dsconfig-training-phase1_sweep/golden-sweep-34
save_training_run success
/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:410: UserWarning:

Bummer! Plotly can currently only draw Line2D objects from matplotlib that are in 'data' coordinates!

/opt/conda/lib/python3.6/site-packages/plotly/matplotlylib/renderer.py:512: UserWarning:

I found a path object that I don't think is part of a bar chart. Ignoring.


Waiting for W&B process to finish, PID 17596
Program ended successfully.
Find user logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201646-dhwflqpu/logs/debug.log
Find internal logs for this run at: /ws/forks/fastbook/wandb/run-20201230_201646-dhwflqpu/logs/debug-internal.log

Run summary:


epoch 40
train_loss 16582.28125
raw_loss 16091.3418
wd_0 0.01
sqr_mom_0 0.99
lr_0 0.0
mom_0 0.95
eps_0 1e-05
_step 760
_runtime 81
_timestamp 1609381087
valid_loss 17410.78125
mae 131.93716
mse 17410.78125

Run history:


epoch ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
train_loss ▁▆█▇██▇█▇▇▇▇▇▆▆▇▆▆▆▅▆▅▆▅▅▅▅▆▅▅▅▅▅▅▅▅▅▅▅▅
raw_loss █▃▁▇▃▇▃▂█▃█▇▂▆▃█▇▃▇▇▇▆▆▇▇▆▇▆▇▆▆▇▇▆▇▆▇▇▆▃
wd_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
sqr_mom_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
lr_0 ▂▂▂▃▄▅▆▇███████▇▇▇▇▆▆▆▅▅▅▄▄▄▃▃▃▂▂▂▂▁▁▁▁▁
mom_0 ██▇▆▅▄▃▂▁▁▁▁▁▁▁▂▂▂▂▃▃▃▄▄▄▅▅▅▆▆▆▇▇▇▇█████
eps_0 ▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
_step ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_runtime ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
_timestamp ▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▇▇▇▇▇▇███
valid_loss ▁▆▇████▇▇▇▇▇▇▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆
mae ▁▆▇████▇▇▇▇▇▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆
mse ▁▆▇████▇▇▇▇▇▇▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆

Synced 6 W&B file(s), 1 media file(s), 1 artifact file(s) and 2 other file(s)
wandb: Sweep Agent: Waiting for job.
wandb: Job received.
wandb: Agent Starting Run: abmwh988 with config:
wandb: 	checkpoint: single-dsconfig-training-phase0_sweep/0
wandb: 	div: 10
wandb: 	lr_max: 1e-05
wandb: 	n_epoch: 80
Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable
Tracking run with wandb version 0.10.12
Syncing run quiet-sweep-35 to Weights & Biases (Documentation).
Project page: https://wandb.ai/soellingeraj/ts-rnn
Sweep page: https://wandb.ai/soellingeraj/ts-rnn/sweeps/vbyp7u8r
Run page: https://wandb.ai/soellingeraj/ts-rnn/runs/abmwh988
Run data is saved locally in /ws/forks/fastbook/wandb/run-20201230_201852-abmwh988

load_existing_checkpoint single-dsconfig-training-phase0_sweep/0
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
/opt/conda/lib/python3.6/site-packages/fastai/learner.py:55: UserWarning:

Saved filed doesn't contain an optimizer state.

15.00% [12/80 00:23<02:11]
epoch train_loss valid_loss mae mse time
0 16881.113281 17151.136719 130.959320 17151.136719 00:01
1 16878.820312 17446.955078 132.083435 17446.955078 00:01
2 16876.208984 17486.324219 132.229065 17486.324219 00:01
3 16873.048828 17526.808594 132.369080 17526.808594 00:01
4 16869.144531 17551.029297 132.448166 17551.029297 00:01
5 16864.316406 17553.990234 132.454880 17553.990234 00:01
6 16858.488281 17550.400391 132.439590 17550.400391 00:01
7 16851.685547 17542.806641 132.409210 17542.806641 00:01
8 16844.027344 17533.001953 132.370819 17533.001953 00:01
9 16835.484375 17523.636719 132.332626 17523.636719 00:01
10 16826.144531 17516.369141 132.304886 17516.369141 00:01
11 16816.181641 17510.248047 132.282364 17510.248047 00:01

0.00% [0/19 00:00<00:00]
</div> </div>
Better model found at epoch 0 with valid_loss value: 17151.13671875.
</div> </div> </div>
learn.fit_one_cycle(10, lr_max=1e-3, div=50.,)
learn.fit_one_cycle(10, lr_max=1e-3, div=50.,)
learn.fit_one_cycle(10, lr_max=1e-3, div=50.,)
learn.fit_one_cycle(10, lr_max=1e-3, div=50.,)
learn.fit_one_cycle(10, lr_max=1e-3, div=50.,)
learn.fit_one_cycle(10, lr_max=0.1, div=50.,)
learn.fit_one_cycle(20, lr_max=0.1, div=50.,)
learn.fit_one_cycle(20, lr_max=0.1, div=50.,)
learn.fit_one_cycle(40, lr_max=slice(1e-07, 1e-06), div=50.,)
learn.fit_one_cycle(40, lr_max=slice(1e-07, 1e-06), div=50.,)
learn.fit_one_cycle(40, lr_max=slice(1e-04, 1e-1), div=50.,)
learn.fit_one_cycle(20,lr_max=slice(3e-07,9e-07), div=50.,)
learn.fit_one_cycle(20, lr_max=slice(1e-03, 1e-01), div=50.,)
WandbCallback requires use of "SaveModelCallback" to log best model
WandbCallback was not able to prepare a DataLoader for logging prediction samples -> tuple index out of range
epoch train_loss valid_loss mae mse time
0 16500.857422 17058.966797 130.454163 17058.966797 00:01
1 16435.394531 16334.189453 127.655075 16334.189453 00:01
2 16343.923828 15895.993164 125.941399 15895.993164 00:01
3 16242.678711 15843.721680 125.743126 15843.721680 00:01
4 16139.079102 16056.191406 126.584099 16056.191406 00:01
5 16039.528320 15964.089844 126.224045 15964.089844 00:01
6 15948.698242 15934.723633 126.103943 15934.723633 00:01
7 15871.872070 15880.710938 125.885681 15880.710938 00:01
8 15807.691406 15862.152344 125.809784 15862.152344 00:01
9 15753.535156 15828.784180 125.677429 15828.784180 00:01
wandb: WARNING Step must only increase in log calls.  Step 0 < 1; dropping {'epoch': 0.16666666666666666, 'train_loss': TensorBase(16534.7344), 'raw_loss': TensorBase(16534.7500), 'wd_0': 0.01, 'sqr_mom_0': 0.99, 'lr_0': 2e-05, 'mom_0': 0.95, 'eps_0': 1e-05}.
epoch train_loss valid_loss mae mse time
0 15470.530273 15807.198242 125.590584 15807.198242 00:01
1 15450.349609 15748.249023 125.350815 15748.249023 00:01
2 15397.788086 15369.669922 123.832458 15369.669922 00:01
3 15323.241211 14496.727539 120.258461 14496.727539 00:01
4 15241.073242 14810.000000 121.528625 14810.000000 00:01
5 15157.637695 14561.288086 120.511833 14561.288086 00:01
6 15073.031250 14484.727539 120.184296 14484.727539 00:01
7 15001.869141 14237.068359 119.154198 14237.068359 00:01
8 14941.365234 14286.781250 119.361732 14286.781250 00:01
9 14890.690430 14274.882812 119.311737 14274.882812 00:01
epoch train_loss valid_loss mae mse time
0 14604.194336 14319.080078 119.494308 14319.080078 00:01
1 14572.034180 14023.852539 118.248299 14023.852539 00:01
2 14517.897461 11494.937500 107.068962 11494.937500 00:01
3 14436.357422 11822.633789 108.572411 11822.633789 00:01
4 14340.881836 13002.192383 113.824600 13002.192383 00:01
5 14232.510742 13026.773438 113.895866 13026.773438 00:01
6 14134.999023 13047.250000 113.979614 13047.250000 00:01
7 14039.472656 12630.025391 112.135170 12630.025391 00:01
8 13951.371094 12370.928711 110.939743 12370.928711 00:01
9 13885.535156 12396.886719 111.074158 12396.886719 00:01
epoch train_loss valid_loss mae mse time
0 13545.776367 12378.215820 110.988564 12378.215820 00:01
1 13506.545898 11457.753906 106.783272 11457.753906 00:01
2 13425.323242 9944.270508 99.477539 9944.270508 00:01
3 13313.986328 9618.911133 97.676544 9618.911133 00:01
4 13187.502930 9595.845703 97.499611 9595.845703 00:01
5 13072.986328 9962.482422 99.214157 9962.482422 00:01
6 12960.182617 10250.379883 100.751534 10250.379883 00:01
7 12849.628906 9783.357422 98.511055 9783.357422 00:01
8 12765.044922 10108.954102 100.156601 10108.954102 00:01
9 12695.324219 10490.105469 102.018394 10490.105469 00:01
epoch train_loss valid_loss mae mse time
0 12330.806641 10802.211914 103.498978 10802.211914 00:01
1 12262.324219 10862.475586 103.793655 10862.475586 00:01
2 12160.761719 6844.447266 82.174149 6844.447266 00:01
3 12057.375977 8600.846680 91.995308 8600.846680 00:01
4 11919.283203 9083.191406 94.626465 9083.191406 00:01
5 11797.177734 8949.733398 93.899994 8949.733398 00:01
6 11677.922852 9467.136719 96.650116 9467.136719 00:01
7 11573.804688 9596.978516 97.406593 9596.978516 00:01
8 11483.841797 9797.712891 98.405647 9797.712891 00:01
9 11410.873047 9862.557617 98.694275 9862.557617 00:01

learn.save('best-Ar{}'.format(window_length))
learn.export('best-Ar{}-envoker'.format(window_length))

# 50 epochs
learn.lr_find(stop_div=False)
SuggestedLRs(lr_min=3.981071586167673e-07, lr_steep=9.12010818865383e-07)

learn.save('100epochs')
learn.export('100epochs-envoker')
learn = learn.load('100epochs')

learn_e = load_learner('100epochs-envoker')

preds,targs = learn.get_preds()
preds_df = pd.DataFrame(preds.flatten(), columns=['preds'])
targs_df = pd.DataFrame(targs.flatten(), columns=['targs'])
results = pd.concat([preds_df, targs_df], axis=1)

preds.flatten()
tensor([253.2895, 127.3790, 128.3668,  ..., 125.5647, 125.4898, 125.4906])

itemify(X1, y1)[1][0]
array([[127.67810059]])
</div>