Skip to content

Commit

Permalink
refactor: Removed contiguous param since it's included in torch>=1.7 (#…
Browse files Browse the repository at this point in the history
…756)

* refactor: Removed contiguous params

* ci: Removed contiguous params from CI

* refactor: Refactored loaders usage in training script

* refactor: Removed unused import
  • Loading branch information
fg-mindee committed Dec 26, 2021
1 parent 807a392 commit 106a1e1
Show file tree
Hide file tree
Showing 8 changed files with 11 additions and 46 deletions.
3 changes: 0 additions & 3 deletions .github/workflows/references.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ jobs:
python -m pip install --upgrade pip
pip install -e .[torch] --upgrade
pip install -r references/requirements.txt
pip install contiguous-params
sudo apt-get update && sudo apt-get install fonts-freefont-ttf -y
- if: matrix.framework == 'tensorflow'
name: Train for a short epoch (TF)
Expand Down Expand Up @@ -121,7 +120,6 @@ jobs:
python -m pip install --upgrade pip
pip install -e .[torch] --upgrade
pip install -r references/requirements.txt
pip install contiguous-params
- name: Download and extract toy set
run: |
wget https://github.com/mindee/doctr/releases/download/v0.3.1/toy_recogition_set-036a4d80.zip
Expand Down Expand Up @@ -240,7 +238,6 @@ jobs:
python -m pip install --upgrade pip
pip install -e .[torch] --upgrade
pip install -r references/requirements.txt
pip install contiguous-params
- name: Download and extract toy set
run: |
wget https://github.com/mindee/doctr/releases/download/v0.3.1/toy_detection_set-bbbb4243.zip
Expand Down
5 changes: 0 additions & 5 deletions references/classification/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,6 @@ pip install -e . --upgrade
pip install -r references/requirements.txt
```

if you are using PyTorch back-end, there is an extra dependency (to optimize data loading):
```shell
pip install contiguous-params>=1.0.0
```

## Usage

You can start your training in TensorFlow:
Expand Down
11 changes: 3 additions & 8 deletions references/classification/train_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import numpy as np
import torch
import wandb
from contiguous_params import ContiguousParams
from fastprogress.fastprogress import master_bar, progress_bar
from torch.nn.functional import cross_entropy
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
Expand Down Expand Up @@ -105,10 +104,8 @@ def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, m
scaler = torch.cuda.amp.GradScaler()

model.train()
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for _ in progress_bar(range(len(train_loader)), parent=mb):
images, targets = next(train_iter)
for images, targets in progress_bar(train_loader, parent=mb):

if torch.cuda.is_available():
images = images.cuda()
Expand Down Expand Up @@ -141,8 +138,7 @@ def evaluate(model, val_loader, batch_transforms, amp=False):
model.eval()
# Validation loop
val_loss, correct, samples, batch_cnt = 0, 0, 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
for images, targets in val_loader:
images = batch_transforms(images)

if torch.cuda.is_available():
Expand Down Expand Up @@ -265,8 +261,7 @@ def main(args):
return

# Optimizer
model_params = ContiguousParams([p for p in model.parameters() if p.requires_grad]).contiguous()
optimizer = torch.optim.Adam(model_params, args.lr,
optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], args.lr,
betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)

# LR Finder
Expand Down
5 changes: 0 additions & 5 deletions references/detection/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,6 @@ pip install -e . --upgrade
pip install -r references/requirements.txt
```

if you are using PyTorch back-end, there is an extra dependency (to optimize data loading):
```shell
pip install contiguous-params>=1.0.0
```

## Usage

You can start your training in TensorFlow:
Expand Down
10 changes: 3 additions & 7 deletions references/detection/train_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import numpy as np
import torch
import wandb
from contiguous_params import ContiguousParams
from fastprogress.fastprogress import master_bar, progress_bar
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
Expand Down Expand Up @@ -107,9 +106,8 @@ def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, m
scaler = torch.cuda.amp.GradScaler()

model.train()
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_iter, parent=mb):
for images, targets in progress_bar(train_loader, parent=mb):

if torch.cuda.is_available():
images = images.cuda()
Expand Down Expand Up @@ -145,8 +143,7 @@ def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
for images, targets in val_loader:
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
Expand Down Expand Up @@ -272,8 +269,7 @@ def main(args):
p.reguires_grad_(False)

# Optimizer
model_params = ContiguousParams([p for p in model.parameters() if p.requires_grad]).contiguous()
optimizer = torch.optim.Adam(model_params, args.lr,
optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], args.lr,
betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
# LR Finder
if args.find_lr:
Expand Down
8 changes: 2 additions & 6 deletions references/obj_detection/train_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,9 +116,8 @@ def fit_one_epoch(model, train_loader, optimizer, scheduler, mb, amp=False):
scaler = torch.cuda.amp.GradScaler()

model.train()
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_iter, parent=mb):
for images, targets in progress_bar(train_loader, parent=mb):

targets = convert_to_abs_coords(targets, images.shape)
if torch.cuda.is_available():
Expand Down Expand Up @@ -148,10 +147,7 @@ def fit_one_epoch(model, train_loader, optimizer, scheduler, mb, amp=False):
def evaluate(model, val_loader, metric, amp=False):
model.eval()
metric.reset()
val_iter = iter(val_loader)
for images, targets in val_iter:

images, targets = next(val_iter)
for images, targets in val_loader:
targets = convert_to_abs_coords(targets, images.shape)
if torch.cuda.is_available():
images = images.cuda()
Expand Down
5 changes: 0 additions & 5 deletions references/recognition/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,6 @@ pip install -e . --upgrade
pip install -r references/requirements.txt
```

if you are using PyTorch back-end, there is an extra dependency (to optimize data loading):
```shell
pip install contiguous-params>=1.0.0
```

## Usage

You can start your training in TensorFlow:
Expand Down
10 changes: 3 additions & 7 deletions references/recognition/train_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import numpy as np
import torch
import wandb
from contiguous_params import ContiguousParams
from fastprogress.fastprogress import master_bar, progress_bar
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
Expand Down Expand Up @@ -108,9 +107,8 @@ def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, m
scaler = torch.cuda.amp.GradScaler()

model.train()
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_iter, parent=mb):
for images, targets in progress_bar(train_loader, parent=mb):

if torch.cuda.is_available():
images = images.cuda()
Expand Down Expand Up @@ -148,8 +146,7 @@ def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
for images, targets in val_loader:
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
Expand Down Expand Up @@ -279,8 +276,7 @@ def main(args):
return

# Optimizer
model_params = ContiguousParams([p for p in model.parameters() if p.requires_grad]).contiguous()
optimizer = torch.optim.Adam(model_params, args.lr,
optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], args.lr,
betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)
# LR Finder
if args.find_lr:
Expand Down

0 comments on commit 106a1e1

Please sign in to comment.