Skip to main content

torch

Project description


Merge mutiple models or embedding into a single one very easily
in Pytorch.





## Usage

https://colab.research.google.com/drive/1vOFxEcLQdgCxCCJCkp-mxouTQ1f8F5FX?usp=sharing



### Example Short:

import os, random, numpy as np, pandas as pd ;from box import Box
from copy import deepcopy
import copy, collections
import torch.nn as nn
import torchvision

##### only this
from torchmerge import merge as me


#############################################################################################
def test3d():    
   from box import Box ; from copy import deepcopy
   from torch.utils.data import DataLoader, TensorDataset
   

   ARG = Box({
       'MODE'   : 'mode1',
       'DATASET': {},
       'MODEL_INFO' : {},
   })
   PARAMS = {}


   ##################################################################
   if ARG.MODE == 'mode1':
       ARG.MODEL_INFO.TYPE = 'dataonly' 
       train_config                           = Box({})
       train_config.LR                        = 0.001
       train_config.SEED                      = 42
       train_config.DEVICE                    = 'cpu'
       train_config.BATCH_SIZE                = 64
       train_config.EPOCHS                    = 1
       train_config.EARLY_STOPPING_THLD       = 10
       train_config.VALID_FREQ                = 1
       train_config.SAVE_FILENAME             = './model.pt'
       train_config.TRAIN_RATIO               = 0.7
       train_config.VAL_RATIO                 = 0.2
       train_config.TEST_RATIO                = 0.1


   ####################################################################
   def load_DataFrame():
       return None


   def test_dataset_f_mnist(samples=100):
       from sklearn.model_selection import train_test_split
       from torchvision import transforms, datasets
       # Generate the transformations
       train_list_transforms = [transforms.ToTensor(),transforms.Lambda(lambda x: x.repeat(3, 1, 1))]

       dataset1 = datasets.FashionMNIST(root="data",train=True,
                                        transform=transforms.Compose(train_list_transforms),download=True,)
       
       #sampling the requred no. of samples from dataset 
       dataset1 = torch.utils.data.Subset(dataset1, np.arange(samples))
       X,Y    = [],  []
       for data, targets in dataset1:
           X.append(data)
           Y.append(targets)

       #Converting list to tensor format
       X,y = torch.stack(X),torch.Tensor(Y)

       train_r, test_r, val_r  = train_config.TRAIN_RATIO, train_config.TEST_RATIO,train_config.VAL_RATIO
       train_X, test_X, train_y, test_y = train_test_split(X,  y,  test_size=1 - train_r)
       valid_X, test_X, valid_y, test_y = train_test_split(test_X, test_y, test_size= test_r / (test_r + val_r))
       return (train_X, train_y, valid_X, valid_y, test_X , test_y)


   def prepro_dataset(self,df:pd.DataFrame=None):
       train_X ,train_y,valid_X ,valid_y,test_X, test_y = test_dataset_f_mnist(samples=100)
       return train_X ,train_y,valid_X ,valid_y,test_X,test_y


   
   ### modelA  ########################################################
   from torchvision import  models
   model_ft = models.resnet18(pretrained=True)
   embA_dim = model_ft.fc.in_features  ###

   ARG.modelA               = {}   
   ARG.modelA.name          = 'resnet18'
   ARG.modelA.nn_model      = model_ft
   ARG.modelA.layer_emb_id  = 'fc'
   ARG.modelA.architect     = [ embA_dim]  ### head s
   modelA = me.model_create(ARG.modelA)
   


   ### modelB  ########################################################
   from torchvision import  models
   model_ft = models.resnet50(pretrained=True)
   embB_dim = int(model_ft.fc.in_features)

   ARG.modelB               = {}   
   ARG.modelB.name          = 'resnet50'
   ARG.modelB.nn_model      = model_ft
   ARG.modelB.layer_emb_id  = 'fc'
   ARG.modelB.architect     = [embB_dim ]   ### head size
   modelB = me.model_create(ARG.modelB )




   ### merge_model  ###################################################
   ### EXPLICIT DEPENDENCY  
   ARG.merge_model           = {}
   ARG.merge_model.name      = 'modelmerge1'

   ARG.merge_model.architect                  = {}
   ARG.merge_model.architect.input_dim        =  embA_dim + embB_dim 

   ARG.merge_model.architect.merge_type       = 'cat'
   ARG.merge_model.architect.merge_layers_dim = [1024, 768]  ### Common embedding is 768
   ARG.merge_model.architect.merge_custom     = None


   ### Custom head
   ARG.merge_model.architect.head_layers_dim  = [ 128, 1]    ### Specific task    
   ARG.merge_model.architect.head_custom      = None
 
 
   ARG.merge_model.dataset       = {}
   ARG.merge_model.dataset.dirin = "/"
   ARG.merge_model.dataset.coly = 'ytarget'
   ARG.merge_model.train_config  = train_config


   model = me.MergeModel_create(ARG, model_create_list= [modelA, modelB ] )
   model.build()



   #### Run Model   ###################################################
   model.training(load_DataFrame, prepro_dataset) 

   model.save_weight('ztmp/model_x5.pt')
   model.load_weights('ztmp/model_x5.pt')
   inputs = torch.randn((train_config.BATCH_SIZE,3,28,28)).to(model.device)
   outputs = model.predict(inputs)
   print(outputs)













Project details


Download files

Download the file for your platform. If you're not sure which to choose, learn more about installing packages.

Source Distribution

torchmerge-0.1.16540980.tar.gz (9.5 kB view hashes)

Uploaded Source

Built Distribution

torchmerge-0.1.16540980-py3-none-any.whl (8.0 kB view hashes)

Uploaded Python 3

Supported by

AWS AWS Cloud computing and Security Sponsor Datadog Datadog Monitoring Fastly Fastly CDN Google Google Download Analytics Microsoft Microsoft PSF Sponsor Pingdom Pingdom Monitoring Sentry Sentry Error logging StatusPage StatusPage Status page