|
|
@@ -807,20 +807,21 @@ static struct vmbus_xfer_pages * vmbus_xfer_pages ( struct vmbus_device *vmdev,
|
|
807
|
807
|
}
|
|
808
|
808
|
|
|
809
|
809
|
/**
|
|
810
|
|
- * Construct I/O buffer from transfer pages
|
|
|
810
|
+ * Construct I/O buffer list from transfer pages
|
|
811
|
811
|
*
|
|
812
|
812
|
* @v vmdev VMBus device
|
|
813
|
813
|
* @v header Transfer page header
|
|
814
|
|
- * @ret iobuf I/O buffer, or NULL on error
|
|
|
814
|
+ * @v list I/O buffer list to populate
|
|
|
815
|
+ * @ret rc Return status code
|
|
815
|
816
|
*/
|
|
816
|
|
-static struct io_buffer *
|
|
817
|
|
-vmbus_xfer_page_iobuf ( struct vmbus_device *vmdev,
|
|
818
|
|
- struct vmbus_packet_header *header ) {
|
|
|
817
|
+static int vmbus_xfer_page_iobufs ( struct vmbus_device *vmdev,
|
|
|
818
|
+ struct vmbus_packet_header *header,
|
|
|
819
|
+ struct list_head *list ) {
|
|
819
|
820
|
struct vmbus_xfer_page_header *page_header =
|
|
820
|
821
|
container_of ( header, struct vmbus_xfer_page_header, header );
|
|
821
|
822
|
struct vmbus_xfer_pages *pages;
|
|
822
|
823
|
struct io_buffer *iobuf;
|
|
823
|
|
- size_t total_len;
|
|
|
824
|
+ struct io_buffer *tmp;
|
|
824
|
825
|
size_t len;
|
|
825
|
826
|
size_t offset;
|
|
826
|
827
|
unsigned int range_count;
|
|
|
@@ -832,28 +833,32 @@ vmbus_xfer_page_iobuf ( struct vmbus_device *vmdev,
|
|
832
|
833
|
|
|
833
|
834
|
/* Locate page set */
|
|
834
|
835
|
pages = vmbus_xfer_pages ( vmdev, page_header->pageset );
|
|
835
|
|
- if ( ! pages )
|
|
|
836
|
+ if ( ! pages ) {
|
|
|
837
|
+ rc = -ENOENT;
|
|
836
|
838
|
goto err_pages;
|
|
837
|
|
-
|
|
838
|
|
- /* Determine total length */
|
|
839
|
|
- range_count = le32_to_cpu ( page_header->range_count );
|
|
840
|
|
- for ( total_len = 0, i = 0 ; i < range_count ; i++ ) {
|
|
841
|
|
- len = le32_to_cpu ( page_header->range[i].len );
|
|
842
|
|
- total_len += len;
|
|
843
|
839
|
}
|
|
844
|
840
|
|
|
845
|
|
- /* Allocate I/O buffer */
|
|
846
|
|
- iobuf = alloc_iob ( total_len );
|
|
847
|
|
- if ( ! iobuf ) {
|
|
848
|
|
- DBGC ( vmdev, "VMBUS %s could not allocate %zd-byte I/O "
|
|
849
|
|
- "buffer\n", vmdev->dev.name, total_len );
|
|
850
|
|
- goto err_alloc;
|
|
851
|
|
- }
|
|
852
|
|
-
|
|
853
|
|
- /* Populate I/O buffer */
|
|
|
841
|
+ /* Allocate and populate I/O buffers */
|
|
|
842
|
+ range_count = le32_to_cpu ( page_header->range_count );
|
|
854
|
843
|
for ( i = 0 ; i < range_count ; i++ ) {
|
|
|
844
|
+
|
|
|
845
|
+ /* Parse header */
|
|
855
|
846
|
len = le32_to_cpu ( page_header->range[i].len );
|
|
856
|
847
|
offset = le32_to_cpu ( page_header->range[i].offset );
|
|
|
848
|
+
|
|
|
849
|
+ /* Allocate I/O buffer */
|
|
|
850
|
+ iobuf = alloc_iob ( len );
|
|
|
851
|
+ if ( ! iobuf ) {
|
|
|
852
|
+ DBGC ( vmdev, "VMBUS %s could not allocate %zd-byte "
|
|
|
853
|
+ "I/O buffer\n", vmdev->dev.name, len );
|
|
|
854
|
+ rc = -ENOMEM;
|
|
|
855
|
+ goto err_alloc;
|
|
|
856
|
+ }
|
|
|
857
|
+
|
|
|
858
|
+ /* Add I/O buffer to list */
|
|
|
859
|
+ list_add ( &iobuf->list, list );
|
|
|
860
|
+
|
|
|
861
|
+ /* Populate I/O buffer */
|
|
857
|
862
|
if ( ( rc = pages->op->copy ( pages, iob_put ( iobuf, len ),
|
|
858
|
863
|
offset, len ) ) != 0 ) {
|
|
859
|
864
|
DBGC ( vmdev, "VMBUS %s could not populate I/O buffer "
|
|
|
@@ -863,13 +868,16 @@ vmbus_xfer_page_iobuf ( struct vmbus_device *vmdev,
|
|
863
|
868
|
}
|
|
864
|
869
|
}
|
|
865
|
870
|
|
|
866
|
|
- return iobuf;
|
|
|
871
|
+ return 0;
|
|
867
|
872
|
|
|
868
|
873
|
err_copy:
|
|
869
|
|
- free_iob ( iobuf );
|
|
870
|
874
|
err_alloc:
|
|
|
875
|
+ list_for_each_entry_safe ( iobuf, tmp, list, list ) {
|
|
|
876
|
+ list_del ( &iobuf->list );
|
|
|
877
|
+ free_iob ( iobuf );
|
|
|
878
|
+ }
|
|
871
|
879
|
err_pages:
|
|
872
|
|
- return NULL;
|
|
|
880
|
+ return rc;
|
|
873
|
881
|
}
|
|
874
|
882
|
|
|
875
|
883
|
/**
|
|
|
@@ -880,7 +888,7 @@ vmbus_xfer_page_iobuf ( struct vmbus_device *vmdev,
|
|
880
|
888
|
*/
|
|
881
|
889
|
int vmbus_poll ( struct vmbus_device *vmdev ) {
|
|
882
|
890
|
struct vmbus_packet_header *header = vmdev->packet;
|
|
883
|
|
- struct io_buffer *iobuf;
|
|
|
891
|
+ struct list_head list;
|
|
884
|
892
|
void *data;
|
|
885
|
893
|
size_t header_len;
|
|
886
|
894
|
size_t len;
|
|
|
@@ -929,6 +937,14 @@ int vmbus_poll ( struct vmbus_device *vmdev ) {
|
|
929
|
937
|
DBGC2_HDA ( vmdev, old_cons, header, ring_len );
|
|
930
|
938
|
assert ( ( ( cons - old_cons ) & ( vmdev->in_len - 1 ) ) == ring_len );
|
|
931
|
939
|
|
|
|
940
|
+ /* Allocate I/O buffers, if applicable */
|
|
|
941
|
+ INIT_LIST_HEAD ( &list );
|
|
|
942
|
+ if ( header->type == cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) ) {
|
|
|
943
|
+ if ( ( rc = vmbus_xfer_page_iobufs ( vmdev, header,
|
|
|
944
|
+ &list ) ) != 0 )
|
|
|
945
|
+ return rc;
|
|
|
946
|
+ }
|
|
|
947
|
+
|
|
932
|
948
|
/* Update producer index */
|
|
933
|
949
|
rmb();
|
|
934
|
950
|
vmdev->in->cons = cpu_to_le32 ( cons );
|
|
|
@@ -948,12 +964,8 @@ int vmbus_poll ( struct vmbus_device *vmdev ) {
|
|
948
|
964
|
break;
|
|
949
|
965
|
|
|
950
|
966
|
case cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) :
|
|
951
|
|
- iobuf = vmbus_xfer_page_iobuf ( vmdev, header );
|
|
952
|
|
- /* Call recv_data() even if I/O buffer allocation
|
|
953
|
|
- * failed, to allow for completions to be sent.
|
|
954
|
|
- */
|
|
955
|
967
|
if ( ( rc = vmdev->op->recv_data ( vmdev, xid, data, len,
|
|
956
|
|
- iob_disown ( iobuf ) ) )!=0){
|
|
|
968
|
+ &list ) ) != 0 ) {
|
|
957
|
969
|
DBGC ( vmdev, "VMBUS %s could not handle data packet: "
|
|
958
|
970
|
"%s\n", vmdev->dev.name, strerror ( rc ) );
|
|
959
|
971
|
return rc;
|