From https://pytorch.org/
to install pytorch on MacOS the following is stated :
conda install pytorch torchvision -c pytorch # MacOS Binaries dont support CUDA, install from source if CUDA is needed Why would want to install pytorch without cuda enabled ?
Reason I ask is I receive error :
--------------------------------------------------------------------------- AssertionError Traceback (most recent call last) in () 78 # predicted = outputs.data.max(1)[1] 79 ---> 80 output = model(torch.tensor([[1,1]]).float().cuda()) 81 predicted = output.data.max(1)[1] 82
~/anaconda3/lib/python3.6/site-packages/torch/cuda/init.py in _lazy_init() 159 raise RuntimeError( 160 "Cannot re-initialize CUDA in forked subprocess. " + msg) --> 161 _check_driver() 162 torch._C._cuda_init() 163 _cudart = _load_cudart()
~/anaconda3/lib/python3.6/site-packages/torch/cuda/init.py in _check_driver() 73 def _check_driver(): 74 if not hasattr(torch._C, '_cuda_isDriverSufficient'): ---> 75 raise AssertionError("Torch not compiled with CUDA enabled") 76 if not torch._C._cuda_isDriverSufficient(): 77 if torch._C._cuda_getDriverVersion() == 0:
AssertionError: Torch not compiled with CUDA enabled
when attempting to execute code :
x = torch.tensor([[0,0] , [0,1] , [1,0]]).float() print(x) y = torch.tensor([0,1,1]).long() print(y) my_train = data_utils.TensorDataset(x, y) my_train_loader = data_utils.DataLoader(my_train, batch_size=2, shuffle=True) # Device configuration device = 'cpu' print(device) # Hyper-parameters input_size = 2 hidden_size = 100 num_classes = 2 learning_rate = 0.001 train_dataset = my_train train_loader = my_train_loader pred = [] for i in range(0 , model_iters) : # Fully connected neural network with one hidden layer class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size, num_classes) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) return out model = NeuralNet(input_size, hidden_size, num_classes).to(device) # Loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Train the model total_step = len(train_loader) for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): # Move tensors to the configured device images = images.reshape(-1, 2).to(device) labels = labels.to(device) # Forward pass outputs = model(images) loss = criterion(outputs, labels) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item())) output = model(torch.tensor([[1,1]]).float().cuda()) To fix this error I need to install pytorch from source with cuda already installed ?
device = 'cpu'in your pytorch script, but also:output = model(torch.tensor([[1,1]]).float().cuda())deviceinstead to achieve such compatibility..to(device)instead of.cuda(). Depending on the value of 'device' the GPU can then be used. Typically this is done like so:device = torch.device('cuda' if torch.cuda.is_available() else 'cpu').