You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
pytorch-image-models/blseresnext-test.ipynb

139 lines
8.6 KiB

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from fastai.vision import *"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import timm"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"#setup paths\n",
"path = Path(untar_data(URLs.CIFAR_100))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"#hyperparameters\n",
"learning_rate = 0.0005133\n",
"dropout = 0.4068\n",
"bs = 32\n",
"epochs=10"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"scrolled": true
},
"outputs": [
{
"ename": "TypeError",
"evalue": "new() received an invalid combination of arguments - got (bool, int), but expected one of:\n * (torch.device device)\n * (torch.Storage storage)\n * (Tensor other)\n * (tuple of ints size, torch.device device)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mbool\u001b[0m, \u001b[31;1mint\u001b[0m)\n * (object data, torch.device device)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mbool\u001b[0m, \u001b[31;1mint\u001b[0m)\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-5-7a35ae156aa7>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;31m#timm.models.blresnet_model(50,2,4)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m#timm.models.blresnext_model(50,4,32,2,4,pretrained = True)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[0mtimm\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mblseresnext_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m50\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m32\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32mG:\\Notebooks\\my-timm\\timm\\models\\blseresnext.py\u001b[0m in \u001b[0;36mblseresnext_model\u001b[1;34m(depth, basewidth, cardinality, alpha, beta, use_se, num_classes, pretrained)\u001b[0m\n\u001b[0;32m 299\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 300\u001b[0m model = bLSEResNeXt(Bottleneck, layers, basewidth, cardinality,\n\u001b[1;32m--> 301\u001b[1;33m alpha, beta, num_classes,use_se)\n\u001b[0m\u001b[0;32m 302\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mpretrained\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 303\u001b[0m url = model_urls['blseresnext-{}-{}x{}d-a{}-b{}'.format(depth, cardinality,\n",
"\u001b[1;32mG:\\Notebooks\\my-timm\\timm\\models\\blseresnext.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, block, layers, basewidth, cardinality, alpha, beta, use_se, num_classes)\u001b[0m\n\u001b[0;32m 199\u001b[0m num_channels[3] * block.expansion, layers[3], basewidth, cardinality, stride=2)\n\u001b[0;32m 200\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgappool\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mAdaptiveAvgPool2d\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 201\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfc\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnum_channels\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m*\u001b[0m \u001b[0mblock\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexpansion\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnum_classes\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 202\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mm\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodules\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\envs\\fastai_v1\\lib\\site-packages\\torch\\nn\\modules\\linear.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, in_features, out_features, bias)\u001b[0m\n\u001b[0;32m 70\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0min_features\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0min_features\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 71\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mout_features\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mout_features\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 72\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mParameter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mout_features\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0min_features\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 73\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mbias\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 74\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mParameter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mout_features\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mTypeError\u001b[0m: new() received an invalid combination of arguments - got (bool, int), but expected one of:\n * (torch.device device)\n * (torch.Storage storage)\n * (Tensor other)\n * (tuple of ints size, torch.device device)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mbool\u001b[0m, \u001b[31;1mint\u001b[0m)\n * (object data, torch.device device)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mbool\u001b[0m, \u001b[31;1mint\u001b[0m)\n"
]
}
],
"source": [
"#models\n",
"#model = models.resnet50\n",
"#timm.models.blresnet_model(50,2,4)\n",
"#timm.models.blresnext_model(50,4,32,2,4,pretrained = True)\n",
"timm.models.blseresnext_model(50,4,32,2,4)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=25, max_lighting=0.1, max_zoom=1.05, max_warp=0.9)\n",
"\n",
"data = (ImageList.from_folder(path)\n",
".split_by_rand_pct()\n",
".label_from_folder()\n",
".transform(get_transforms())\n",
".databunch()\n",
".normalize(imagenet_stats))\n",
"\n",
"learn = cnn_learner(data,\n",
"model,\n",
"pretrained=False,\n",
"ps=dropout,\n",
"metrics=accuracy).to_fp16()\n",
"\n",
"learn.fit_one_cycle(epochs, max_lr=learning_rate)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"learn.save(inity1)\n",
"learn.export(init1y.pkl)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [conda env:fastai_v1] *",
"language": "python",
"name": "conda-env-fastai_v1-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}